pax_global_header00006660000000000000000000000064122655664730014532gustar00rootroot0000000000000052 comment=9646b5b2f3cd3184d150e1b09c53dc044dfa3989 bfgminer-bfgminer-3.10.0/000077500000000000000000000000001226556647300151735ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/.gitignore000066400000000000000000000012101226556647300171550ustar00rootroot00000000000000bfgminer bfgminer.exe bfgminer-rpc bfgminer-rpc.exe bitforce-firmware-flash cgminer cgminer.exe minerd minerd.exe *.o *.bin autom4te.cache .deps Makefile Makefile.in INSTALL aclocal.m4 configure depcomp missing install-sh stamp-h1 cpuminer-config.h* compile config.log config.status config.guess config.sub 70-bfgminer.rules mingw32-config.cache *~ *.orig *.rej *.swp *.kate-swp ext_deps config.h.in config.h ccan/libccan.a lib/arg-nonnull.h lib/c++defs.h lib/libgnu.a lib/signal.h lib/string.h lib/stdint.h lib/warn-on-use.h iospeeds_local.h mkinstalldirs .pc bfgminer-bitforce.conf vgcore.* core* *.log *.json *.patch *.zip *.tbz2 *.bz2 bfgminer-bfgminer-3.10.0/.gitmodules000066400000000000000000000001411226556647300173440ustar00rootroot00000000000000[submodule "libblkmaker"] path = libblkmaker url = git://gitorious.org/bitcoin/libblkmaker.git bfgminer-bfgminer-3.10.0/70-bfgminer.rules.in000066400000000000000000000030031226556647300206650ustar00rootroot00000000000000# do not edit this file, it will be overwritten on update ACTION=="add", SUBSYSTEMS=="usb", GOTO="bfgminer_start" GOTO="bfgminer_end" LABEL="bfgminer_start" @USE_BIFURY_TRUE@ENV{ID_MODEL}=="*bi•fury*", GOTO="bfgminer_add" @HAS_BIGPIC_TRUE@ENV{ID_MODEL}=="*Bitfury*BF1*", GOTO="bfgminer_add" @HAS_BITFORCE_TRUE@ENV{ID_MODEL}=="*BitFORCE*SHA256*", GOTO="bfgminer_add" @USE_DRILLBIT_TRUE@ENV{manufacturer}=="*Drillbit*", GOTO="bfgminer_add" @HAS_ICARUS_TRUE@ENV{ID_MODEL}=="*Cairnsmore1*", GOTO="bfgminer_add" @HAS_ICARUS_TRUE@ENV{ID_MODEL}=="*Block*Erupter*", GOTO="bfgminer_add" @USE_HASHBUSTER_TRUE@ENV{ID_MODEL}=="*HashBuster*", GOTO="bfgminer_add" @USE_HASHBUSTERUSB_TRUE@ENV{ID_MODEL}=="*HashBuster*", GOTO="bfgminer_add" @USE_HASHFAST_TRUE@ENV{idVendor}=="297c", ENV{manufacturer}=="*HashFast*", GOTO="bfgminer_add" @HAS_KLONDIKE_TRUE@ENV{idVendor}=="04d8", ENV{idProduct}=="f60a", ENV{manufacturer}=="*Klondike*", GOTO="bfgminer_add" @HAS_LITTLEFURY_TRUE@ENV{ID_MODEL}=="*LittleFury*", GOTO="bfgminer_add" @HAS_MODMINER_TRUE@ENV{ID_MODEL}=="*ModMiner*", GOTO="bfgminer_add" @HAS_NANOFURY_TRUE@ENV{idVendor}=="04d8", ENV{idProduct}=="00de", ENV{ID_MODEL}=="*NanoFury*", GOTO="bfgminer_add" @HAS_TWINFURY_TRUE@ENV{ID_MODEL}=="*Twinfury*", GOTO="bfgminer_add" @HAS_X6500_TRUE@ENV{idVendor}=="0403", ENV{idProduct}=="6001", ENV{ID_MODEL}=="*X6500 FPGA Miner*", GOTO="bfgminer_add" @HAS_ZTEX_TRUE@ENV{ID_MODEL}=="*btcminer for ZTEX*", GOTO="bfgminer_add" GOTO="bfgminer_end" LABEL="bfgminer_add" GROUP="video" LABEL="bfgminer_end" bfgminer-bfgminer-3.10.0/ADL/000077500000000000000000000000001226556647300155735ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ADL/adl_defines.h000066400000000000000000000613371226556647300202130ustar00rootroot00000000000000/* The statements-of-fact provided herein are intended to be compatible with * AMD ADL's library. AMD is the creator and copyright holder of the ADL * library this interface describes, and therefore also defined this interface * originally. * These free interfaces were created by Luke Dashjr * As interfaces/APIs cannot be copyrighted, there is no license needed in the * USA and probably many other jurisdictions. * If your jurisdiction rules otherwise, the header is offered by Luke Dashjr * under the MIT license, but you are responsible for determining who your * jurisdiction considers to be the copyright holder in such a case. * * THE INFORMATION IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE INFORMATION OR THE USE OR OTHER DEALINGS IN THE * INFORMATION. */ #ifndef ADL_DEFINES_H_ #define ADL_DEFINES_H_ enum { ADL_FALSE = 0, ADL_TRUE = 1, }; #define ADL_MAX_CHAR 4096 #define ADL_MAX_PATH 256 #define ADL_MAX_ADAPTERS 150 #define ADL_MAX_DISPLAYS 150 #define ADL_MAX_DEVICENAME 32 #define ADL_ADAPTER_INDEX_ALL -1 #define ADL_MAIN_API_OPTION_NONE 0 enum { ADL_DDC_OPTION_SWITCHDDC2 = 1, ADL_DDC_OPTION_RESTORECOMMAND = 2, }; enum { ADL_DL_I2C_ACTIONREAD = 1, ADL_DL_I2C_ACTIONWRITE = 2, ADL_DL_I2C_ACTIONREAD_REPEATEDSTART = 3, }; enum { ADL_OK_WAIT = 4, ADL_OK_RESTART = 3, ADL_OK_MODE_CHANGE = 2, ADL_OK_WARNING = 1, ADL_OK = 0, ADL_ERR = -1, ADL_ERR_NOT_INIT = -2, ADL_ERR_INVALID_PARAM = -3, ADL_ERR_INVALID_PARAM_SIZE = -4, ADL_ERR_INVALID_ADL_IDX = -5, ADL_ERR_INVALID_CONTROLLER_IDX = -6, ADL_ERR_INVALID_DIPLAY_IDX = -7, ADL_ERR_NOT_SUPPORTED = -8, ADL_ERR_NULL_POINTER = -9, ADL_ERR_DISABLED_ADAPTER = -10, ADL_ERR_INVALID_CALLBACK = -11, ADL_ERR_RESOURCE_CONFLICT = -12, }; enum { ADL_DT_MONITOR = 0, ADL_DT_TELEVISION = 1, ADL_DT_LCD_PANEL = 2, ADL_DT_DIGITAL_FLAT_PANEL = 3, ADL_DT_COMPONENT_VIDEO = 4, ADL_DT_PROJECTOR = 5, }; enum { ADL_DOT_UNKNOWN = 0, ADL_DOT_COMPOSITE = 1, ADL_DOT_SVIDEO = 2, ADL_DOT_ANALOG = 3, ADL_DOT_DIGITAL = 4, }; enum { ADL_DISPLAY_COLOR_BRIGHTNESS = (1 << 0), ADL_DISPLAY_COLOR_CONTRAST = (1 << 1), ADL_DISPLAY_COLOR_SATURATION = (1 << 2), ADL_DISPLAY_COLOR_HUE = (1 << 3), ADL_DISPLAY_COLOR_TEMPERATURE = (1 << 4), ADL_DISPLAY_COLOR_TEMPERATURE_SOURCE_EDID = (1 << 5), ADL_DISPLAY_COLOR_TEMPERATURE_SOURCE_USER = (1 << 6), }; enum { ADL_DISPLAY_ADJUST_OVERSCAN = (1 << 0), ADL_DISPLAY_ADJUST_VERT_POS = (1 << 1), ADL_DISPLAY_ADJUST_HOR_POS = (1 << 2), ADL_DISPLAY_ADJUST_VERT_SIZE = (1 << 3), ADL_DISPLAY_ADJUST_HOR_SIZE = (1 << 4), ADL_DISPLAY_ADJUST_SIZEPOS = (ADL_DISPLAY_ADJUST_VERT_POS | ADL_DISPLAY_ADJUST_HOR_POS | ADL_DISPLAY_ADJUST_VERT_SIZE | ADL_DISPLAY_ADJUST_HOR_SIZE), ADL_DISPLAY_CUSTOMMODES = (1 << 5), ADL_DISPLAY_ADJUST_UNDERSCAN = (1 << 6), }; enum { ADL_DESKTOPCONFIG_UNKNOWN = 0, ADL_DESKTOPCONFIG_SINGLE = (1 << 0), ADL_DESKTOPCONFIG_CLONE = (1 << 2), ADL_DESKTOPCONFIG_BIGDESK_H = (1 << 4), ADL_DESKTOPCONFIG_BIGDESK_V = (1 << 5), ADL_DESKTOPCONFIG_BIGDESK_HR = (1 << 6), ADL_DESKTOPCONFIG_BIGDESK_VR = (1 << 7), ADL_DESKTOPCONFIG_RANDR12 = (1 << 8), }; #define ADL_MAX_DISPLAY_NAME 256 enum { ADL_DISPLAYDDCINFOEX_FLAG_PROJECTORDEVICE = (1 << 0), ADL_DISPLAYDDCINFOEX_FLAG_EDIDEXTENSION = (1 << 1), ADL_DISPLAYDDCINFOEX_FLAG_DIGITALDEVICE = (1 << 2), ADL_DISPLAYDDCINFOEX_FLAG_HDMIAUDIODEVICE = (1 << 3), ADL_DISPLAYDDCINFOEX_FLAG_SUPPORTS_AI = (1 << 4), ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC601 = (1 << 5), ADL_DISPLAYDDCINFOEX_FLAG_SUPPORT_xvYCC709 = (1 << 6), }; enum { ADL_DISPLAY_CONTYPE_UNKNOWN = 0, ADL_DISPLAY_CONTYPE_VGA = 1, ADL_DISPLAY_CONTYPE_DVI_D = 2, ADL_DISPLAY_CONTYPE_DVI_I = 3, ADL_DISPLAY_CONTYPE_ATICVDONGLE_NTSC = 4, ADL_DISPLAY_CONTYPE_ATICVDONGLE_JPN = 5, ADL_DISPLAY_CONTYPE_ATICVDONGLE_NONI2C_JPN = 6, ADL_DISPLAY_CONTYPE_ATICVDONGLE_NONI2C_NTSC = 7, ADL_DISPLAY_CONTYPE_HDMI_TYPE_A = 10, ADL_DISPLAY_CONTYPE_HDMI_TYPE_B = 11, ADL_DISPLAY_CONTYPE_SVIDEO = 12, ADL_DISPLAY_CONTYPE_COMPOSITE = 13, ADL_DISPLAY_CONTYPE_RCA_3COMPONENT = 14, ADL_DISPLAY_CONTYPE_DISPLAYPORT = 15, }; enum { ADL_TV_STANDARDS = (1 << 0), ADL_TV_SCART = (1 << 1), }; enum { ADL_STANDARD_NTSC_M = (1 << 0), ADL_STANDARD_NTSC_JPN = (1 << 1), ADL_STANDARD_NTSC_N = (1 << 2), ADL_STANDARD_PAL_B = (1 << 3), ADL_STANDARD_PAL_COMB_N = (1 << 4), ADL_STANDARD_PAL_D = (1 << 5), ADL_STANDARD_PAL_G = (1 << 6), ADL_STANDARD_PAL_H = (1 << 7), ADL_STANDARD_PAL_I = (1 << 8), ADL_STANDARD_PAL_K = (1 << 9), ADL_STANDARD_PAL_K1 = (1 << 10), ADL_STANDARD_PAL_L = (1 << 11), ADL_STANDARD_PAL_M = (1 << 12), ADL_STANDARD_PAL_N = (1 << 13), ADL_STANDARD_PAL_SECAM_D = (1 << 14), ADL_STANDARD_PAL_SECAM_K = (1 << 15), ADL_STANDARD_PAL_SECAM_K1 = (1 << 16), ADL_STANDARD_PAL_SECAM_L = (1 << 17), }; enum { ADL_CUSTOMIZEDMODEFLAG_MODESUPPORTED = (1 << 0), ADL_CUSTOMIZEDMODEFLAG_NOTDELETETABLE = (1 << 1), ADL_CUSTOMIZEDMODEFLAG_INSERTBYDRIVER = (1 << 2), ADL_CUSTOMIZEDMODEFLAG_INTERLACED = (1 << 3), ADL_CUSTOMIZEDMODEFLAG_BASEMODE = (1 << 4), }; enum { ADL_DISPLAY_CV_DONGLE_D1 = (1 << 0), ADL_DISPLAY_CV_DONGLE_D2 = (1 << 1), ADL_DISPLAY_CV_DONGLE_D3 = (1 << 2), ADL_DISPLAY_CV_DONGLE_D4 = (1 << 3), ADL_DISPLAY_CV_DONGLE_D5 = (1 << 4), }; enum { ADL_DISPLAY_CV_DONGLE_480I = (1 << 0), ADL_DISPLAY_CV_DONGLE_480P = (1 << 1), ADL_DISPLAY_CV_DONGLE_540P = (1 << 2), ADL_DISPLAY_CV_DONGLE_720P = (1 << 3), ADL_DISPLAY_CV_DONGLE_1080I = (1 << 4), ADL_DISPLAY_CV_DONGLE_1080P = (1 << 5), ADL_DISPLAY_CV_DONGLE_16_9 = (1 << 6), ADL_DISPLAY_CV_DONGLE_720P50 = (1 << 7), ADL_DISPLAY_CV_DONGLE_1080I25 = (1 << 8), ADL_DISPLAY_CV_DONGLE_576I25 = (1 << 9), ADL_DISPLAY_CV_DONGLE_576P50 = (1 << 10), ADL_DISPLAY_CV_DONGLE_1080P24 = (1 << 11), ADL_DISPLAY_CV_DONGLE_1080P25 = (1 << 12), ADL_DISPLAY_CV_DONGLE_1080P30 = (1 << 13), ADL_DISPLAY_CV_DONGLE_1080P50 = (1 << 14), }; enum { ADL_DISPLAY_FORMAT_FORCE_720P = (1 << 0), ADL_DISPLAY_FORMAT_FORCE_1080I = (1 << 1), ADL_DISPLAY_FORMAT_FORCE_1080P = (1 << 2), ADL_DISPLAY_FORMAT_FORCE_720P50 = (1 << 3), ADL_DISPLAY_FORMAT_FORCE_1080I25 = (1 << 4), ADL_DISPLAY_FORMAT_FORCE_576I25 = (1 << 5), ADL_DISPLAY_FORMAT_FORCE_576P50 = (1 << 6), ADL_DISPLAY_FORMAT_FORCE_1080P24 = (1 << 7), ADL_DISPLAY_FORMAT_FORCE_1080P25 = (1 << 8), ADL_DISPLAY_FORMAT_FORCE_1080P30 = (1 << 9), ADL_DISPLAY_FORMAT_FORCE_1080P50 = (1 << 10), }; enum { ADL_DISPLAY_FORMAT_CVDONGLEOVERIDE = (1 << 0), ADL_DISPLAY_FORMAT_CVMODEUNDERSCAN = (1 << 1), ADL_DISPLAY_FORMAT_FORCECONNECT_SUPPORTED = (1 << 2), ADL_DISPLAY_FORMAT_RESTRICT_FORMAT_SELECTION = (1 << 3), ADL_DISPLAY_FORMAT_SETASPECRATIO = (1 << 4), ADL_DISPLAY_FORMAT_FORCEMODES = (1 << 5), ADL_DISPLAY_FORMAT_LCDRTCCOEFF = (1 << 6), }; enum { ADL_PM_PARAM_DONT_CHANGE = 0, }; enum { ADL_BUSTYPE_PCI = 0, ADL_BUSTYPE_AGP = 1, ADL_BUSTYPE_PCIE = 2, ADL_BUSTYPE_PCIE_GEN2 = 3, }; enum { ADL_STEREO_OFF = 0, ADL_STEREO_ACTIVE = (1 << 1), ADL_STEREO_SUPPORTED = (1 << 2), ADL_STEREO_BLUE_LINE = (1 << 3), ADL_STEREO_PASSIVE = (1 << 6), ADL_STEREO_PASSIVE_HORIZ = (1 << 7), ADL_STEREO_PASSIVE_VERT = (1 << 8), ADL_STEREO_AUTO_HORIZONTAL = (1 << 30), ADL_STEREO_AUTO_VERTICAL = (1 << 31), }; enum { ADL_WORKSTATION_LOADBALANCING_SUPPORTED = 1, ADL_WORKSTATION_LOADBALANCING_AVAILABLE = 2, }; enum { ADL_WORKSTATION_LOADBALANCING_DISABLED = 0, ADL_WORKSTATION_LOADBALANCING_ENABLED = 1, }; enum { ADL_CONTEXT_SPEED_UNFORCED = 0, ADL_CONTEXT_SPEED_FORCEHIGH = 1, ADL_CONTEXT_SPEED_FORCELOW = 2, }; enum { ADL_ADAPTER_SPEEDCAPS_SUPPORTED = (1 << 0), }; enum { ADL_GLSYNC_PORT_UNKNOWN = 0, ADL_GLSYNC_PORT_BNC = 1, ADL_GLSYNC_PORT_RJ45PORT1 = 2, ADL_GLSYNC_PORT_RJ45PORT2 = 3, }; enum { ADL_GLSYNC_CONFIGMASK_NONE = 0, ADL_GLSYNC_CONFIGMASK_SIGNALSOURCE = (1 << 0), ADL_GLSYNC_CONFIGMASK_SYNCFIELD = (1 << 1), ADL_GLSYNC_CONFIGMASK_SAMPLERATE = (1 << 2), ADL_GLSYNC_CONFIGMASK_SYNCDELAY = (1 << 3), ADL_GLSYNC_CONFIGMASK_TRIGGEREDGE = (1 << 4), ADL_GLSYNC_CONFIGMASK_SCANRATECOEFF = (1 << 5), ADL_GLSYNC_CONFIGMASK_FRAMELOCKCNTL = (1 << 6), }; enum { ADL_GLSYNC_FRAMELOCKCNTL_NONE = 0, ADL_GLSYNC_FRAMELOCKCNTL_ENABLE = (1 << 0), ADL_GLSYNC_FRAMELOCKCNTL_DISABLE = (1 << 1), ADL_GLSYNC_FRAMELOCKCNTL_SWAP_COUNTER_RESET = (1 << 2), ADL_GLSYNC_FRAMELOCKCNTL_SWAP_COUNTER_ACK = (1 << 3), ADL_GLSYNC_FRAMELOCKCNTL_STATE_ENABLE = (1 << 0), }; enum { ADL_GLSYNC_COUNTER_SWAP = (1 << 0), }; enum { ADL_GLSYNC_SIGNALSOURCE_UNDEFINED = 0x0100, ADL_GLSYNC_SIGNALSOURCE_FREERUN = 0x0101, ADL_GLSYNC_SIGNALSOURCE_BNCPORT = 0x0102, ADL_GLSYNC_SIGNALSOURCE_RJ45PORT1 = 0x0103, ADL_GLSYNC_SIGNALSOURCE_RJ45PORT2 = 0x0104, }; enum { ADL_GLSYNC_SIGNALTYPE_UNDEFINED = 0, ADL_GLSYNC_SIGNALTYPE_480I = 1, ADL_GLSYNC_SIGNALTYPE_576I = 2, ADL_GLSYNC_SIGNALTYPE_480P = 3, ADL_GLSYNC_SIGNALTYPE_576P = 4, ADL_GLSYNC_SIGNALTYPE_720P = 5, ADL_GLSYNC_SIGNALTYPE_1080P = 6, ADL_GLSYNC_SIGNALTYPE_1080I = 7, ADL_GLSYNC_SIGNALTYPE_SDI = 8, ADL_GLSYNC_SIGNALTYPE_TTL = 9, ADL_GLSYNC_SIGNALTYPE_ANALOG = 10, }; enum { ADL_GLSYNC_SYNCFIELD_UNDEFINED = 0, ADL_GLSYNC_SYNCFIELD_BOTH = 1, ADL_GLSYNC_SYNCFIELD_1 = 2, }; enum { ADL_GLSYNC_TRIGGEREDGE_UNDEFINED = 0, ADL_GLSYNC_TRIGGEREDGE_RISING = 1, ADL_GLSYNC_TRIGGEREDGE_FALLING = 2, ADL_GLSYNC_TRIGGEREDGE_BOTH = 3, }; enum { ADL_GLSYNC_SCANRATECOEFF_UNDEFINED = 0, ADL_GLSYNC_SCANRATECOEFF_x5 = 1, ADL_GLSYNC_SCANRATECOEFF_x4 = 2, ADL_GLSYNC_SCANRATECOEFF_x3 = 3, ADL_GLSYNC_SCANRATECOEFF_x5_DIV_2 = 4, ADL_GLSYNC_SCANRATECOEFF_x2 = 5, ADL_GLSYNC_SCANRATECOEFF_x3_DIV_2 = 6, ADL_GLSYNC_SCANRATECOEFF_x5_DIV_4 = 7, ADL_GLSYNC_SCANRATECOEFF_x1 = 8, ADL_GLSYNC_SCANRATECOEFF_x4_DIV_5 = 9, ADL_GLSYNC_SCANRATECOEFF_x2_DIV_3 = 10, ADL_GLSYNC_SCANRATECOEFF_x1_DIV_2 = 11, ADL_GLSYNC_SCANRATECOEFF_x2_DIV_5 = 12, ADL_GLSYNC_SCANRATECOEFF_x1_DIV_3 = 13, ADL_GLSYNC_SCANRATECOEFF_x1_DIV_4 = 14, ADL_GLSYNC_SCANRATECOEFF_x1_DIV_5 = 15, }; enum { ADL_GLSYNC_PORTSTATE_UNDEFINED = 0, ADL_GLSYNC_PORTSTATE_NOCABLE = 1, ADL_GLSYNC_PORTSTATE_IDLE = 2, ADL_GLSYNC_PORTSTATE_INPUT = 3, ADL_GLSYNC_PORTSTATE_OUTPUT = 4, }; enum { ADL_GLSYNC_LEDTYPE_BNC = 0, ADL_GLSYNC_LEDTYPE_RJ45_LEFT = 0, ADL_GLSYNC_LEDTYPE_RJ45_RIGHT = 1, }; enum { ADL_GLSYNC_LEDCOLOR_UNDEFINED = 0, ADL_GLSYNC_LEDCOLOR_NOLIGHT = 1, ADL_GLSYNC_LEDCOLOR_YELLOW = 2, ADL_GLSYNC_LEDCOLOR_RED = 3, ADL_GLSYNC_LEDCOLOR_GREEN = 4, ADL_GLSYNC_LEDCOLOR_FLASH_GREEN = 5, }; enum { ADL_GLSYNC_PORTCNTL_NONE = 0, ADL_GLSYNC_PORTCNTL_OUTPUT = 1, }; enum { ADL_GLSYNC_MODECNTL_NONE = 0, ADL_GLSYNC_MODECNTL_GENLOCK = 1, ADL_GLSYNC_MODECNTL_TIMINGSERVER = 2, }; enum { ADL_GLSYNC_MODECNTL_STATUS_NONE = 0, ADL_GLSYNC_MODECNTL_STATUS_GENLOCK = (1 << 0), ADL_GLSYNC_MODECNTL_STATUS_SETMODE_REQUIRED = (1 << 1), ADL_GLSYNC_MODECNTL_STATUS_GENLOCK_ALLOWED = (1 << 2), }; #define ADL_MAX_GLSYNC_PORTS 8 #define ADL_MAX_GLSYNC_PORT_LEDS 8 enum { ADL_XFIREX_STATE_NOINTERCONNECT = (1 << 0), ADL_XFIREX_STATE_DOWNGRADEPIPES = (1 << 1), ADL_XFIREX_STATE_DOWNGRADEMEM = (1 << 2), ADL_XFIREX_STATE_REVERSERECOMMENDED = (1 << 3), ADL_XFIREX_STATE_3DACTIVE = (1 << 4), ADL_XFIREX_STATE_MASTERONSLAVE = (1 << 5), ADL_XFIREX_STATE_NODISPLAYCONNECT = (1 << 6), ADL_XFIREX_STATE_NOPRIMARYVIEW = (1 << 7), ADL_XFIREX_STATE_DOWNGRADEVISMEM = (1 << 8), ADL_XFIREX_STATE_LESSTHAN8LANE_MASTER = (1 << 9), ADL_XFIREX_STATE_LESSTHAN8LANE_SLAVE = (1 << 10), ADL_XFIREX_STATE_PEERTOPEERFAILED = (1 << 11), ADL_XFIREX_STATE_MEMISDOWNGRADED = (1 << 16), ADL_XFIREX_STATE_PIPESDOWNGRADED = (1 << 17), ADL_XFIREX_STATE_XFIREXACTIVE = (1 << 18), ADL_XFIREX_STATE_VISMEMISDOWNGRADED = (1 << 19), ADL_XFIREX_STATE_INVALIDINTERCONNECTION = (1 << 20), ADL_XFIREX_STATE_NONP2PMODE = (1 << 21), ADL_XFIREX_STATE_DOWNGRADEMEMBANKS = (1 << 22), ADL_XFIREX_STATE_MEMBANKSDOWNGRADED = (1 << 23), ADL_XFIREX_STATE_DUALDISPLAYSALLOWED = (1 << 24), ADL_XFIREX_STATE_P2P_APERTURE_MAPPING = (1 << 25), ADL_XFIREX_STATE_P2PFLUSH_REQUIRED = ADL_XFIREX_STATE_P2P_APERTURE_MAPPING, ADL_XFIREX_STATE_XSP_CONNECTED = (1 << 26), ADL_XFIREX_STATE_ENABLE_CF_REBOOT_REQUIRED = (1 << 27), ADL_XFIREX_STATE_DISABLE_CF_REBOOT_REQUIRED = (1 << 28), ADL_XFIREX_STATE_DRV_HANDLE_DOWNGRADE_KEY = (1 << 29), ADL_XFIREX_STATE_CF_RECONFIG_REQUIRED = (1 << 30), ADL_XFIREX_STATE_ERRORGETTINGSTATUS = (1 << 31), }; enum { ADL_DISPLAY_PIXELFORMAT_UNKNOWN = 0, ADL_DISPLAY_PIXELFORMAT_RGB = (1 << 0), ADL_DISPLAY_PIXELFORMAT_YCRCB444 = (1 << 1), ADL_DISPLAY_PIXELFORMAT_YCRCB422 = (1 << 2), ADL_DISPLAY_PIXELFORMAT_RGB_LIMITED_RANGE = (1 << 3), ADL_DISPLAY_PIXELFORMAT_RGB_FULL_RANGE = ADL_DISPLAY_PIXELFORMAT_RGB }; enum { ADL_DL_DISPLAYCONFIG_CONTYPE_UNKNOWN = 0, ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NONI2C_JP = 1, ADL_DL_DISPLAYCONFIG_CONTYPE_CV_JPN = 2, ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NA = 3, ADL_DL_DISPLAYCONFIG_CONTYPE_CV_NONI2C_NA = 4, ADL_DL_DISPLAYCONFIG_CONTYPE_VGA = 5, ADL_DL_DISPLAYCONFIG_CONTYPE_DVI_D = 6, ADL_DL_DISPLAYCONFIG_CONTYPE_DVI_I = 7, ADL_DL_DISPLAYCONFIG_CONTYPE_HDMI_TYPE_A = 8, ADL_DL_DISPLAYCONFIG_CONTYPE_HDMI_TYPE_B = 9, ADL_DL_DISPLAYCONFIG_CONTYPE_DISPLAYPORT = 10, }; enum { ADL_DISPLAY_DISPLAYINFO_DISPLAYCONNECTED = (1 << 0), ADL_DISPLAY_DISPLAYINFO_DISPLAYMAPPED = (1 << 1), ADL_DISPLAY_DISPLAYINFO_NONLOCAL = (1 << 2), ADL_DISPLAY_DISPLAYINFO_FORCIBLESUPPORTED = (1 << 3), ADL_DISPLAY_DISPLAYINFO_GENLOCKSUPPORTED = (1 << 4), ADL_DISPLAY_DISPLAYINFO_MULTIVPU_SUPPORTED = (1 << 5), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_SINGLE = (1 << 8), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_CLONE = (1 << 9), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_2VSTRETCH = (1 << 10), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_2HSTRETCH = (1 << 11), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_EXTENDED = (1 << 12), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_NSTRETCH1GPU = (1 << 16), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_NSTRETCHNGPU = (1 << 17), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_RESERVED2 = (1 << 18), ADL_DISPLAY_DISPLAYINFO_MANNER_SUPPORTED_RESERVED3 = (1 << 19), }; enum { ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NOTACTIVE = (1 << 0), ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_SINGLE = (1 << 1), ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_CLONE = (1 << 2), ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NSTRETCH1GPU = (1 << 3), ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_NSTRETCHNGPU = (1 << 4), ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_2VSTRETCH = (1 << 5), ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_2HSTRETCH = (1 << 6), ADL_ADAPTER_DISPLAYCAP_MANNER_SUPPORTED_EXTENDED = (1 << 7), ADL_ADAPTER_DISPLAYCAP_PREFERDISPLAY_SUPPORTED = (1 << 8), ADL_ADAPTER_DISPLAYCAP_BEZEL_SUPPORTED = (1 << 9), }; enum { ADL_DISPLAY_DISPLAYMAP_MANNER_RESERVED = (1 << 0), ADL_DISPLAY_DISPLAYMAP_MANNER_NOTACTIVE = (1 << 1), ADL_DISPLAY_DISPLAYMAP_MANNER_SINGLE = (1 << 2), ADL_DISPLAY_DISPLAYMAP_MANNER_CLONE = (1 << 3), ADL_DISPLAY_DISPLAYMAP_MANNER_RESERVED1 = (1 << 4), ADL_DISPLAY_DISPLAYMAP_MANNER_HSTRETCH = (1 << 5), ADL_DISPLAY_DISPLAYMAP_MANNER_VSTRETCH = (1 << 6), ADL_DISPLAY_DISPLAYMAP_MANNER_VLD = (1 << 7), }; enum { ADL_DISPLAY_DISPLAYMAP_OPTION_GPUINFO = 1, }; enum { ADL_DISPLAY_DISPLAYTARGET_PREFERRED = 1, }; enum { ADL_DISPLAY_POSSIBLEMAPRESULT_VALID = 1, ADL_DISPLAY_POSSIBLEMAPRESULT_BEZELSUPPORTED = 2, }; enum { ADL_DISPLAY_MODE_COLOURFORMAT_565 = (1 << 0), ADL_DISPLAY_MODE_COLOURFORMAT_8888 = (1 << 1), ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_000 = (1 << 2), ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_090 = (1 << 3), ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_180 = (1 << 4), ADL_DISPLAY_MODE_ORIENTATION_SUPPORTED_270 = (1 << 5), ADL_DISPLAY_MODE_REFRESHRATE_ROUNDED = (1 << 6), ADL_DISPLAY_MODE_REFRESHRATE_ONLY = (1 << 7), }; enum { ADL_DISPLAY_MODE_PROGRESSIVE_FLAG = 0, ADL_DISPLAY_MODE_INTERLACED_FLAG = 2, }; #define ADL_OSMODEINFOXPOS_DEFAULT -640 #define ADL_OSMODEINFOYPOS_DEFAULT 0 #define ADL_OSMODEINFOXRES_DEFAULT 640 #define ADL_OSMODEINFOYRES_DEFAULT 480 #define ADL_OSMODEINFOXRES_DEFAULT800 800 #define ADL_OSMODEINFOYRES_DEFAULT600 600 #define ADL_OSMODEINFOREFRESHRATE_DEFAULT 60 #define ADL_OSMODEINFOCOLOURDEPTH_DEFAULT 8 #define ADL_OSMODEINFOCOLOURDEPTH_DEFAULT16 16 #define ADL_OSMODEINFOCOLOURDEPTH_DEFAULT24 24 #define ADL_OSMODEINFOCOLOURDEPTH_DEFAULT32 32 #define ADL_OSMODEINFOORIENTATION_DEFAULT 0 #define ADL_OSMODEINFOORIENTATION_DEFAULT_WIN7 DISPLAYCONFIG_ROTATION_FORCE_UINT32 #define ADL_OSMODEFLAG_DEFAULT 0 enum ADLPurposeCode { ADL_PURPOSECODE_NORMAL = 0, ADL_PURPOSECODE_HIDE_MODE_SWITCH, ADL_PURPOSECODE_MODE_SWITCH, ADL_PURPOSECODE_ATTATCH_DEVICE, ADL_PURPOSECODE_DETACH_DEVICE, ADL_PURPOSECODE_SETPRIMARY_DEVICE, ADL_PURPOSECODE_GDI_ROTATION, ADL_PURPOSECODE_ATI_ROTATION, }; enum ADLAngle { ADL_ANGLE_LANDSCAPE = 0, ADL_ANGLE_ROTATERIGHT = 90, ADL_ANGLE_ROTATE180 = 180, ADL_ANGLE_ROTATELEFT = 270, }; enum ADLOrientationDataType { ADL_ORIENTATIONTYPE_OSDATATYPE, ADL_ORIENTATIONTYPE_NONOSDATATYPE }; enum ADLPanningMode { ADL_PANNINGMODE_NO_PANNING = 0, ADL_PANNINGMODE_AT_LEAST_ONE_NO_PANNING = 1, ADL_PANNINGMODE_ALLOW_PANNING = 2, }; enum ADLLARGEDESKTOPTYPE { ADL_LARGEDESKTOPTYPE_NORMALDESKTOP = 0, ADL_LARGEDESKTOPTYPE_PSEUDOLARGEDESKTOP = 1, ADL_LARGEDESKTOPTYPE_VERYLARGEDESKTOP = 2, }; #define ADL_I2C_MAJOR_API_REV 1 #define ADL_I2C_MINOR_DEFAULT_API_REV 0 #define ADL_I2C_MINOR_OEM_API_REV 1 enum { ADL_DL_I2C_LINE_OEM = 1, ADL_DL_I2C_LINE_OD_CONTROL = 2, ADL_DL_I2C_LINE_OEM2 = 3, }; #define ADL_DL_I2C_MAXDATASIZE 0x00000040 #define ADL_DL_I2C_MAXWRITEDATASIZE 0x0000000C #define ADL_DL_I2C_MAXADDRESSLENGTH 0x00000006 #define ADL_DL_I2C_MAXOFFSETLENGTH 0x00000004 enum { ADL_DL_DISPLAYPROPERTY_TYPE_UNKNOWN = 0, ADL_DL_DISPLAYPROPERTY_TYPE_EXPANSIONMODE = 1, ADL_DL_DISPLAYPROPERTY_TYPE_USEUNDERSCANSCALING = 2, }; enum { ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_CENTER = 0, ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_FULLSCREEN = 1, ADL_DL_DISPLAYPROPERTY_EXPANSIONMODE_ASPECTRATIO = 2, }; enum { ADL_DL_DISPLAY_DITHER_UNKNOWN = 0, ADL_DL_DISPLAY_DITHER_DISABLED = 1, ADL_DL_DISPLAY_DITHER_ENABLED = 2, }; #define ADL_MAX_EDIDDATA_SIZE 256 #define ADL_MAX_EDID_EXTENSION_BLOCKS 3 enum { ADL_DL_CONTROLLER_OVERLAY_ALPHA = 0, ADL_DL_CONTROLLER_OVERLAY_ALPHAPERPIX = 1, }; enum { ADL_DL_DISPLAY_DATA_PACKET__INFO_PACKET_RESET = 0, ADL_DL_DISPLAY_DATA_PACKET__INFO_PACKET_SET = 1, }; enum { ADL_DL_DISPLAY_DATA_PACKET__TYPE__AVI = (1 << 0), ADL_DL_DISPLAY_DATA_PACKET__TYPE__RESERVED = (1 << 1), ADL_DL_DISPLAY_DATA_PACKET__TYPE__VENDORINFO = (1 << 2), }; enum { ADL_GAMUT_MATRIX_SD = 1, ADL_GAMUT_MATRIX_HD = 2, }; enum { ADL_DL_CLOCKINFO_FLAG_FULLSCREEN3DONLY = (1 << 0), ADL_DL_CLOCKINFO_FLAG_ALWAYSFULLSCREEN3D = (1 << 1), ADL_DL_CLOCKINFO_FLAG_VPURECOVERYREDUCED = (1 << 2), ADL_DL_CLOCKINFO_FLAG_THERMALPROTECTION = (1 << 3), }; enum { ADL_DL_POWERXPRESS_GPU_INTEGRATED = 1, ADL_DL_POWERXPRESS_GPU_DISCRETE = 2, }; enum { ADL_DL_POWERXPRESS_SWITCH_RESULT_STARTED = 1, ADL_DL_POWERXPRESS_SWITCH_RESULT_DECLINED = 2, ADL_DL_POWERXPRESS_SWITCH_RESULT_ALREADY = 3, }; #define ADL_DL_POWERXPRESS_VERSION_MAJOR 2 #define ADL_DL_POWERXPRESS_VERSION_MINOR 0 #define ADL_DL_POWERXPRESS_VERSION (((ADL_DL_POWERXPRESS_VERSION_MAJOR) << 16) | ADL_DL_POWERXPRESS_VERSION_MINOR) enum { ADL_DL_THERMAL_DOMAIN_OTHER = 0, ADL_DL_THERMAL_DOMAIN_GPU = 1, }; enum { ADL_DL_THERMAL_FLAG_INTERRUPT = 1, ADL_DL_THERMAL_FLAG_FANCONTROL = 2, }; enum { ADL_DL_FANCTRL_SUPPORTS_PERCENT_READ = (1 << 0), ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE = (1 << 1), ADL_DL_FANCTRL_SUPPORTS_RPM_READ = (1 << 2), ADL_DL_FANCTRL_SUPPORTS_RPM_WRITE = (1 << 3), }; enum { ADL_DL_FANCTRL_SPEED_TYPE_PERCENT = 1, ADL_DL_FANCTRL_SPEED_TYPE_RPM = 2, }; enum { ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED = 1, }; #define ADL_DL_MAX_MVPU_ADAPTERS 4 enum { MVPU_ADAPTER_0 = (1 << 0), MVPU_ADAPTER_1 = (1 << 1), MVPU_ADAPTER_2 = (1 << 2), MVPU_ADAPTER_3 = (1 << 3), }; #define ADL_DL_MAX_REGISTRY_PATH 256 enum { ADL_DL_MVPU_STATUS_OFF = 0, ADL_DL_MVPU_STATUS_ON = 1, }; enum { ADL_ASIC_UNDEFINED = 0, ADL_ASIC_DISCRETE = (1 << 0), ADL_ASIC_INTEGRATED = (1 << 1), ADL_ASIC_FIREGL = (1 << 2), ADL_ASIC_FIREMV = (1 << 3), ADL_ASIC_XGP = (1 << 4), ADL_ASIC_FUSION = (1 << 5), }; enum { ADL_DL_TIMINGFLAG_DOUBLE_SCAN = (1 << 0), ADL_DL_TIMINGFLAG_INTERLACED = (1 << 1), ADL_DL_TIMINGFLAG_H_SYNC_POLARITY = (1 << 2), ADL_DL_TIMINGFLAG_V_SYNC_POLARITY = (1 << 3), }; enum { ADL_DL_MODETIMING_STANDARD_CVT = (1 << 0), ADL_DL_MODETIMING_STANDARD_GTF = (1 << 1), ADL_DL_MODETIMING_STANDARD_DMT = (1 << 2), ADL_DL_MODETIMING_STANDARD_CUSTOM = (1 << 3), ADL_DL_MODETIMING_STANDARD_DRIVER_DEFAULT = (1 << 4), }; enum { ADL_XSERVERINFO_XINERAMAACTIVE = (1 << 0), ADL_XSERVERINFO_RANDR12SUPPORTED = (1 << 1), }; enum { ADL_CONTROLLERVECTOR_0 = 1, ADL_CONTROLLERVECTOR_1 = 2, }; enum { ADL_DISPLAY_SLSGRID_ORIENTATION_000 = (1 << 0), ADL_DISPLAY_SLSGRID_ORIENTATION_090 = (1 << 1), ADL_DISPLAY_SLSGRID_ORIENTATION_180 = (1 << 2), ADL_DISPLAY_SLSGRID_ORIENTATION_270 = (1 << 3), }; enum { ADL_DISPLAY_SLSGRID_CAP_OPTION_RELATIVETO_LANDSCAPE = (1 << 0), ADL_DISPLAY_SLSGRID_CAP_OPTION_RELATIVETO_CURRENTANGLE = (1 << 1), ADL_DISPLAY_SLSGRID_PORTAIT_MODE = (1 << 2), }; enum { ADL_DISPLAY_SLSMAPCONFIG_GET_OPTION_RELATIVETO_LANDSCAPE = 1, ADL_DISPLAY_SLSMAPCONFIG_GET_OPTION_RELATIVETO_CURRENTANGLE = 2, }; enum { ADL_DISPLAY_SLSMAPCONFIG_CREATE_OPTION_RELATIVETO_LANDSCAPE = 1, ADL_DISPLAY_SLSMAPCONFIG_CREATE_OPTION_RELATIVETO_CURRENTANGLE = 2, }; enum { ADL_DISPLAY_SLSMAPCONFIG_REARRANGE_OPTION_RELATIVETO_LANDSCAPE = 1, ADL_DISPLAY_SLSMAPCONFIG_REARRANGE_OPTION_RELATIVETO_CURRENTANGLE = 2, }; enum { ADL_DISPLAY_SLSGRID_RELATIVETO_LANDSCAPE = 0x10, ADL_DISPLAY_SLSGRID_RELATIVETO_CURRENTANGLE = 0x20, }; enum { ADL_DISPLAY_SLSMAP_BEZELMODE = (1 << 4), ADL_DISPLAY_SLSMAP_DISPLAYARRANGED = (1 << 1), ADL_DISPLAY_SLSMAP_CURRENTCONFIG = (1 << 2), }; enum { ADL_DISPLAY_SLSMAPINDEXLIST_OPTION_ACTIVE = 1, }; enum { ADL_DISPLAY_BEZELOFFSET_STEPBYSTEPSET = (1 << 2), ADL_DISPLAY_BEZELOFFSET_COMMIT = (1 << 3), }; #endif /* ADL_DEFINES_H_ */ bfgminer-bfgminer-3.10.0/ADL/adl_sdk.h000066400000000000000000000025141226556647300173470ustar00rootroot00000000000000/* The statements-of-fact provided herein are intended to be compatible with * AMD ADL's library. AMD is the creator and copyright holder of the ADL * library this interface describes, and therefore also defined this interface * originally. * These free interfaces were created by Luke Dashjr * As interfaces/APIs cannot be copyrighted, there is no license needed in the * USA and probably many other jurisdictions. * If your jurisdiction rules otherwise, the header is offered by Luke Dashjr * under the MIT license, but you are responsible for determining who your * jurisdiction considers to be the copyright holder in such a case. * * THE INFORMATION IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE INFORMATION OR THE USE OR OTHER DEALINGS IN THE * INFORMATION. */ #ifndef ADL_SDK_H_ #define ADL_SDK_H_ #include "adl_structures.h" typedef void*( #ifdef __stdcall __stdcall #endif *ADL_MAIN_MALLOC_CALLBACK)(int); #endif /* ADL_SDK_H_ */ bfgminer-bfgminer-3.10.0/ADL/adl_structures.h000066400000000000000000000322571226556647300210200ustar00rootroot00000000000000/* The statements-of-fact provided herein are intended to be compatible with * AMD ADL's library. AMD is the creator and copyright holder of the ADL * library this interface describes, and therefore also defined this interface * originally. * These free interfaces were created by Luke Dashjr * As interfaces/APIs cannot be copyrighted, there is no license needed in the * USA and probably many other jurisdictions. * If your jurisdiction rules otherwise, the header is offered by Luke Dashjr * under the MIT license, but you are responsible for determining who your * jurisdiction considers to be the copyright holder in such a case. * * THE INFORMATION IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE INFORMATION OR THE USE OR OTHER DEALINGS IN THE * INFORMATION. */ #ifndef ADL_STRUCTURES_H_ #define ADL_STRUCTURES_H_ #include "adl_defines.h" typedef struct AdapterInfo { int iSize; int iAdapterIndex; char strUDID[ADL_MAX_PATH]; int iBusNumber; int iDeviceNumber; int iFunctionNumber; int iVendorID; char strAdapterName[ADL_MAX_PATH]; char strDisplayName[ADL_MAX_PATH]; int iPresent; #ifdef WIN32 int iExist; char strDriverPath[ADL_MAX_PATH]; char strDriverPathExt[ADL_MAX_PATH]; char strPNPString[ADL_MAX_PATH]; int iOSDisplayIndex; #elif !defined(__APPLE__) /* Linux */ int iXScreenNum; int iDrvIndex; char strXScreenConfigName[ADL_MAX_PATH]; #endif /* Linux */ } AdapterInfo, *LPAdapterInfo; #if !(defined(WIN32) || defined(__APPLE__)) typedef struct XScreenInfo { int iXScreenNum; char strXScreenConfigName[ADL_MAX_PATH]; } XScreenInfo, *LPXScreenInfo; #endif /* Linux */ typedef struct ADLMemoryInfo { long long iMemorySize; char strMemoryType[ADL_MAX_PATH]; long long iMemoryBandwidth; } ADLMemoryInfo, *LPADLMemoryInfo; typedef struct ADLDDCInfo { int ulSize; int ulSupportsDDC; int ulManufacturerID; int ulProductID; char cDisplayName[ADL_MAX_DISPLAY_NAME]; int ulMaxHResolution; int ulMaxVResolution; int ulMaxRefresh; int ulPTMCx; int ulPTMCy; int ulPTMRefreshRate; int ulDDCInfoFlag; } ADLDDCInfo, *LPADLDDCInfo; typedef struct ADLGamma { float fRed; float fGreen; float fBlue; } ADLGamma, *LPADLGamma; typedef struct ADLCustomMode { int iFlags; int iModeWidth; int iModeHeight; int iBaseModeWidth; int iBaseModeHeight; int iRefreshRate; } ADLCustomMode, *LPADLCustomMode; typedef struct ADLGetClocksOUT { long ulHighCoreClock; long ulHighMemoryClock; long ulHighVddc; long ulCoreMin; long ulCoreMax; long ulMemoryMin; long ulMemoryMax; long ulActivityPercent; long ulCurrentCoreClock; long ulCurrentMemoryClock; long ulReserved; } ADLGetClocksOUT; typedef struct ADLDisplayConfig { long ulSize; long ulConnectorType; long ulDeviceData; long ulOverridedDeviceData; long ulReserved; } ADLDisplayConfig; typedef struct ADLDisplayID { int iDisplayLogicalIndex; int iDisplayPhysicalIndex; int iDisplayLogicalAdapterIndex; int iDisplayPhysicalAdapterIndex; } ADLDisplayID, *LPADLDisplayID; typedef struct ADLDisplayInfo { ADLDisplayID displayID; int iDisplayControllerIndex; char strDisplayName[ADL_MAX_PATH]; char strDisplayManufacturerName[ADL_MAX_PATH]; int iDisplayType; int iDisplayOutputType; int iDisplayConnector; int iDisplayInfoMask; int iDisplayInfoValue; } ADLDisplayInfo, *LPADLDisplayInfo; typedef struct ADLDisplayMode { int iPelsHeight; int iPelsWidth; int iBitsPerPel; int iDisplayFrequency; } ADLDisplayMode; typedef struct ADLDetailedTiming { int iSize; short sTimingFlags; short sHTotal; short sHDisplay; short sHSyncStart; short sHSyncWidth; short sVTotal; short sVDisplay; short sVSyncStart; short sVSyncWidth; short sPixelClock; short sHOverscanRight; short sHOverscanLeft; short sVOverscanBottom; short sVOverscanTop; short sOverscan8B; short sOverscanGR; } ADLDetailedTiming; typedef struct ADLDisplayModeInfo { int iTimingStandard; int iPossibleStandard; int iRefreshRate; int iPelsWidth; int iPelsHeight; ADLDetailedTiming sDetailedTiming; } ADLDisplayModeInfo; typedef struct ADLDisplayProperty { int iSize; int iPropertyType; int iExpansionMode; int iSupport; int iCurrent; int iDefault; } ADLDisplayProperty; typedef struct ADLClockInfo { int iCoreClock; int iMemoryClock; } ADLClockInfo, *LPADLClockInfo; typedef struct ADLI2C { int iSize; int iLine; int iAddress; int iOffset; int iAction; int iSpeed; int iDataSize; char *pcData; } ADLI2C; typedef struct ADLDisplayEDIDData { int iSize; int iFlag; int iEDIDSize; int iBlockIndex; char cEDIDData[ADL_MAX_EDIDDATA_SIZE]; int iReserved[4]; } ADLDisplayEDIDData; typedef struct ADLControllerOverlayInput { int iSize; int iOverlayAdjust; int iValue; int iReserved; } ADLControllerOverlayInput; typedef struct ADLAdjustmentinfo { int iDefault; int iMin; int iMax; int iStep; } ADLAdjustmentinfo; typedef struct ADLControllerOverlayInfo { int iSize; ADLAdjustmentinfo sOverlayInfo; int iReserved[3]; } ADLControllerOverlayInfo; typedef struct ADLGLSyncModuleID { int iModuleID; int iGlSyncGPUPort; int iFWBootSectorVersion; int iFWUserSectorVersion; } ADLGLSyncModuleID , *LPADLGLSyncModuleID; typedef struct ADLGLSyncPortCaps { int iPortType; int iNumOfLEDs; } ADLGLSyncPortCaps, *LPADLGLSyncPortCaps; typedef struct ADLGLSyncGenlockConfig { int iValidMask; int iSyncDelay; int iFramelockCntlVector; int iSignalSource; int iSampleRate; int iSyncField; int iTriggerEdge; int iScanRateCoeff; } ADLGLSyncGenlockConfig, *LPADLGLSyncGenlockConfig; typedef struct ADLGlSyncPortInfo { int iPortType; int iNumOfLEDs; int iPortState; int iFrequency; int iSignalType; int iSignalSource; } ADLGlSyncPortInfo, *LPADLGlSyncPortInfo; typedef struct ADLGlSyncPortControl { int iPortType; int iControlVector; int iSignalSource; } ADLGlSyncPortControl; typedef struct ADLGlSyncMode { int iControlVector; int iStatusVector; int iGLSyncConnectorIndex; } ADLGlSyncMode, *LPADLGlSyncMode; typedef struct ADLGlSyncMode2 { int iControlVector; int iStatusVector; int iGLSyncConnectorIndex; int iDisplayIndex; } ADLGlSyncMode2, *LPADLGlSyncMode2; typedef struct ADLInfoPacket { char hb0; char hb1; char hb2; char sb[28]; } ADLInfoPacket; typedef struct ADLAVIInfoPacket { char bPB3_ITC; char bPB5; } ADLAVIInfoPacket; typedef struct ADLODClockSetting { int iDefaultClock; int iCurrentClock; int iMaxClock; int iMinClock; int iRequestedClock; int iStepClock; } ADLODClockSetting; typedef struct ADLAdapterODClockInfo { int iSize; int iFlags; ADLODClockSetting sMemoryClock; ADLODClockSetting sEngineClock; } ADLAdapterODClockInfo; typedef struct ADLAdapterODClockConfig { int iSize; int iFlags; int iMemoryClock; int iEngineClock; } ADLAdapterODClockConfig; typedef struct ADLPMActivity { int iSize; int iEngineClock; int iMemoryClock; int iVddc; int iActivityPercent; int iCurrentPerformanceLevel; int iCurrentBusSpeed; int iCurrentBusLanes; int iMaximumBusLanes; int iReserved; } ADLPMActivity; typedef struct ADLThermalControllerInfo { int iSize; int iThermalDomain; int iDomainIndex; int iFlags; } ADLThermalControllerInfo; typedef struct ADLTemperature { int iSize; int iTemperature; } ADLTemperature; typedef struct ADLFanSpeedInfo { int iSize; int iFlags; int iMinPercent; int iMaxPercent; int iMinRPM; int iMaxRPM; } ADLFanSpeedInfo; typedef struct ADLFanSpeedValue { int iSize; int iSpeedType; int iFanSpeed; int iFlags; } ADLFanSpeedValue; typedef struct ADLODParameterRange { int iMin; int iMax; int iStep; } ADLODParameterRange; typedef struct ADLODParameters { int iSize; int iNumberOfPerformanceLevels; int iActivityReportingSupported; int iDiscretePerformanceLevels; int iReserved; ADLODParameterRange sEngineClock; ADLODParameterRange sMemoryClock; ADLODParameterRange sVddc; } ADLODParameters; typedef struct ADLODPerformanceLevel { int iEngineClock; int iMemoryClock; int iVddc; } ADLODPerformanceLevel; typedef struct ADLODPerformanceLevels { int iSize; int iReserved; ADLODPerformanceLevel aLevels[1]; } ADLODPerformanceLevels; typedef struct ADLCrossfireComb { int iNumLinkAdapter; int iAdaptLink[3]; } ADLCrossfireComb; typedef struct ADLCrossfireInfo { int iErrorCode; int iState; int iSupported; } ADLCrossfireInfo; typedef struct ADLBiosInfo { char strPartNumber[ADL_MAX_PATH]; char strVersion[ADL_MAX_PATH]; char strDate[ADL_MAX_PATH]; } ADLBiosInfo, *LPADLBiosInfo; typedef struct ADLAdapterLocation { int iBus; int iDevice; int iFunction; } ADLAdapterLocation; typedef struct ADLMVPUCaps { int iSize; int iAdapterCount; int iPossibleMVPUMasters; int iPossibleMVPUSlaves; char cAdapterPath[ADL_DL_MAX_MVPU_ADAPTERS][ADL_DL_MAX_REGISTRY_PATH]; } ADLMVPUCaps; typedef struct ADLMVPUStatus { int iSize; int iActiveAdapterCount; int iStatus; ADLAdapterLocation aAdapterLocation[ADL_DL_MAX_MVPU_ADAPTERS]; } ADLMVPUStatus; typedef struct ADLActivatableSource { int iAdapterIndex; int iNumActivatableSources; int iActivatableSourceMask; int iActivatableSourceValue; } ADLActivatableSource, *LPADLActivatableSource; typedef struct ADLMode { int iAdapterIndex; ADLDisplayID displayID; int iXPos; int iYPos; int iXRes; int iYRes; int iColourDepth; float fRefreshRate; int iOrientation; int iModeFlag; int iModeMask; int iModeValue; } ADLMode, *LPADLMode; typedef struct ADLDisplayTarget { ADLDisplayID displayID; int iDisplayMapIndex; int iDisplayTargetMask; int iDisplayTargetValue; } ADLDisplayTarget, *LPADLDisplayTarget; typedef struct tagADLBezelTransientMode { int iAdapterIndex; int iSLSMapIndex; int iSLSModeIndex; ADLMode displayMode; int iNumBezelOffset; int iFirstBezelOffsetArrayIndex; int iSLSBezelTransientModeMask; int iSLSBezelTransientModeValue; } ADLBezelTransientMode, *LPADLBezelTransientMode; typedef struct ADLAdapterDisplayCap { int iAdapterIndex; int iAdapterDisplayCapMask; int iAdapterDisplayCapValue; } ADLAdapterDisplayCap, *LPADLAdapterDisplayCap; typedef struct ADLDisplayMap { int iDisplayMapIndex; ADLMode displayMode; int iNumDisplayTarget; int iFirstDisplayTargetArrayIndex; int iDisplayMapMask; int iDisplayMapValue; } ADLDisplayMap, *LPADLDisplayMap; typedef struct ADLPossibleMap { int iIndex; int iAdapterIndex; int iNumDisplayMap; ADLDisplayMap* displayMap; int iNumDisplayTarget; ADLDisplayTarget* displayTarget; } ADLPossibleMap, *LPADLPossibleMap; typedef struct ADLPossibleMapping { int iDisplayIndex; int iDisplayControllerIndex; int iDisplayMannerSupported; } ADLPossibleMapping, *LPADLPossibleMapping; typedef struct ADLPossibleMapResult { int iIndex; int iPossibleMapResultMask; int iPossibleMapResultValue; } ADLPossibleMapResult, *LPADLPossibleMapResult; typedef struct ADLSLSGrid { int iAdapterIndex; int iSLSGridIndex; int iSLSGridRow; int iSLSGridColumn; int iSLSGridMask; int iSLSGridValue; } ADLSLSGrid, *LPADLSLSGrid; typedef struct ADLSLSMap { int iAdapterIndex; int iSLSMapIndex; ADLSLSGrid grid; int iSurfaceMapIndex; int iOrientation; int iNumSLSTarget; int iFirstSLSTargetArrayIndex; int iNumNativeMode; int iFirstNativeModeArrayIndex; int iNumBezelMode; int iFirstBezelModeArrayIndex; int iNumBezelOffset; int iFirstBezelOffsetArrayIndex; int iSLSMapMask; int iSLSMapValue; } ADLSLSMap, *LPADLSLSMap; typedef struct ADLSLSOffset { int iAdapterIndex; int iSLSMapIndex; ADLDisplayID displayID; int iBezelModeIndex; int iBezelOffsetX; int iBezelOffsetY; int iDisplayWidth; int iDisplayHeight; int iBezelOffsetMask; int iBezelffsetValue; } ADLSLSOffset, *LPADLSLSOffset; typedef struct ADLSLSMode { int iAdapterIndex; int iSLSMapIndex; int iSLSModeIndex; ADLMode displayMode; int iSLSNativeModeMask; int iSLSNativeModeValue; } ADLSLSMode, *LPADLSLSMode; typedef struct ADLPossibleSLSMap { int iSLSMapIndex; int iNumSLSMap; ADLSLSMap* lpSLSMap; int iNumSLSTarget; ADLDisplayTarget* lpDisplayTarget; } ADLPossibleSLSMap, *LPADLPossibleSLSMap; typedef struct ADLSLSTarget { int iAdapterIndex; int iSLSMapIndex; ADLDisplayTarget displayTarget; int iSLSGridPositionX; int iSLSGridPositionY; ADLMode viewSize; int iSLSTargetMask; int iSLSTargetValue; } ADLSLSTarget, *LPADLSLSTarget; typedef struct ADLBezelOffsetSteppingSize { int iAdapterIndex; int iSLSMapIndex; int iBezelOffsetSteppingSizeX; int iBezelOffsetSteppingSizeY; int iBezelOffsetSteppingSizeMask; int iBezelOffsetSteppingSizeValue; } ADLBezelOffsetSteppingSize, *LPADLBezelOffsetSteppingSize; #endif /* ADL_STRUCTURES_H_ */ bfgminer-bfgminer-3.10.0/AUTHORS000066400000000000000000000037251226556647300162520ustar00rootroot00000000000000FPGA/ASIC mining and refactor: Luke Dashjr 1QATWksNFGeUJCWBrN4g6hGM178Lovm7Wh GPU mining and refactor: Con Kolivas 15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ AntMiner driver: Nate Woolls and Lingchao Xu Bitfury GPIO-based drivers: Bitfury and Anatoly Legkodymov Big Picture Mining and TwinFury drivers: Andreas Auer Avalon and Icarus drivers: Xiangfu ZTEX FPGA driver: Nelisky Original CPU mining software: Jeff Garzik RPC API: Andrew Smith 1Jjk2LmktEQKnv8r2cZ9MvLiZwZ9gxabKm SUSE packaging: Christian Berendt Ubuntu packaging: Graeme Humphries Contributors: Jason Hughes Ycros Denis Ahrens blinkier Peter Stuge Paul Sheppard Vladimir Strinski Dmitry Sorokin Jason Snell Mark Crichton Zefir Kurtisi HashBuster team bluemurder Philip Kaufmann Rusty Russell Znort 987 Phateus Olivier Gay Glenn Francis Murray fleger pooler Ricardo Iván Vieitez Parra gluk Paul Wouters Abracadabra Josh Lehan pontus Tydus Raulo Thorsten Gilling Isidoro Ghezzi capa66 Red_Wolf_2 Mr O bfgminer-bfgminer-3.10.0/CL/000077500000000000000000000000001226556647300154715ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/CL/cl.h000066400000000000000000001240051226556647300162420ustar00rootroot00000000000000/******************************************************************************* * Copyright (c) 2008-2010 The Khronos Group Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and/or associated documentation files (the * "Materials"), to deal in the Materials without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Materials, and to * permit persons to whom the Materials are furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Materials. * * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. ******************************************************************************/ /* $Revision: 11707 $ on $Date: 2010-06-13 23:30:16 -0700 (Sun, 13 Jun 2010) $ */ #ifndef __OPENCL_CL_H #define __OPENCL_CL_H #include "cl_platform.h" #ifdef __cplusplus extern "C" { #endif /******************************************************************************/ typedef struct _cl_platform_id * cl_platform_id; typedef struct _cl_device_id * cl_device_id; typedef struct _cl_context * cl_context; typedef struct _cl_command_queue * cl_command_queue; typedef struct _cl_mem * cl_mem; typedef struct _cl_program * cl_program; typedef struct _cl_kernel * cl_kernel; typedef struct _cl_event * cl_event; typedef struct _cl_sampler * cl_sampler; typedef cl_uint cl_bool; /* WARNING! Unlike cl_ types in cl_platform.h, cl_bool is not guaranteed to be the same size as the bool in kernels. */ typedef cl_ulong cl_bitfield; typedef cl_bitfield cl_device_type; typedef cl_uint cl_platform_info; typedef cl_uint cl_device_info; typedef cl_bitfield cl_device_address_info; typedef cl_bitfield cl_device_fp_config; typedef cl_uint cl_device_mem_cache_type; typedef cl_uint cl_device_local_mem_type; typedef cl_bitfield cl_device_exec_capabilities; typedef cl_bitfield cl_command_queue_properties; typedef intptr_t cl_context_properties; typedef cl_uint cl_context_info; typedef cl_uint cl_command_queue_info; typedef cl_uint cl_channel_order; typedef cl_uint cl_channel_type; typedef cl_bitfield cl_mem_flags; typedef cl_uint cl_mem_object_type; typedef cl_uint cl_mem_info; typedef cl_uint cl_image_info; typedef cl_uint cl_addressing_mode; typedef cl_uint cl_filter_mode; typedef cl_uint cl_sampler_info; typedef cl_bitfield cl_map_flags; typedef cl_uint cl_program_info; typedef cl_uint cl_program_build_info; typedef cl_int cl_build_status; typedef cl_uint cl_kernel_info; typedef cl_uint cl_kernel_work_group_info; typedef cl_uint cl_event_info; typedef cl_uint cl_command_type; typedef cl_uint cl_profiling_info; typedef struct _cl_image_format { cl_channel_order image_channel_order; cl_channel_type image_channel_data_type; } cl_image_format; /******************************************************************************/ /* Error Codes */ #define CL_SUCCESS 0 #define CL_DEVICE_NOT_FOUND -1 #define CL_DEVICE_NOT_AVAILABLE -2 #define CL_COMPILER_NOT_AVAILABLE -3 #define CL_MEM_OBJECT_ALLOCATION_FAILURE -4 #define CL_OUT_OF_RESOURCES -5 #define CL_OUT_OF_HOST_MEMORY -6 #define CL_PROFILING_INFO_NOT_AVAILABLE -7 #define CL_MEM_COPY_OVERLAP -8 #define CL_IMAGE_FORMAT_MISMATCH -9 #define CL_IMAGE_FORMAT_NOT_SUPPORTED -10 #define CL_BUILD_PROGRAM_FAILURE -11 #define CL_MAP_FAILURE -12 #define CL_INVALID_VALUE -30 #define CL_INVALID_DEVICE_TYPE -31 #define CL_INVALID_PLATFORM -32 #define CL_INVALID_DEVICE -33 #define CL_INVALID_CONTEXT -34 #define CL_INVALID_QUEUE_PROPERTIES -35 #define CL_INVALID_COMMAND_QUEUE -36 #define CL_INVALID_HOST_PTR -37 #define CL_INVALID_MEM_OBJECT -38 #define CL_INVALID_IMAGE_FORMAT_DESCRIPTOR -39 #define CL_INVALID_IMAGE_SIZE -40 #define CL_INVALID_SAMPLER -41 #define CL_INVALID_BINARY -42 #define CL_INVALID_BUILD_OPTIONS -43 #define CL_INVALID_PROGRAM -44 #define CL_INVALID_PROGRAM_EXECUTABLE -45 #define CL_INVALID_KERNEL_NAME -46 #define CL_INVALID_KERNEL_DEFINITION -47 #define CL_INVALID_KERNEL -48 #define CL_INVALID_ARG_INDEX -49 #define CL_INVALID_ARG_VALUE -50 #define CL_INVALID_ARG_SIZE -51 #define CL_INVALID_KERNEL_ARGS -52 #define CL_INVALID_WORK_DIMENSION -53 #define CL_INVALID_WORK_GROUP_SIZE -54 #define CL_INVALID_WORK_ITEM_SIZE -55 #define CL_INVALID_GLOBAL_OFFSET -56 #define CL_INVALID_EVENT_WAIT_LIST -57 #define CL_INVALID_EVENT -58 #define CL_INVALID_OPERATION -59 #define CL_INVALID_GL_OBJECT -60 #define CL_INVALID_BUFFER_SIZE -61 #define CL_INVALID_MIP_LEVEL -62 #define CL_INVALID_GLOBAL_WORK_SIZE -63 /* OpenCL Version */ #define CL_VERSION_1_0 1 /* cl_bool */ #define CL_FALSE 0 #define CL_TRUE 1 /* cl_platform_info */ #define CL_PLATFORM_PROFILE 0x0900 #define CL_PLATFORM_VERSION 0x0901 #define CL_PLATFORM_NAME 0x0902 #define CL_PLATFORM_VENDOR 0x0903 #define CL_PLATFORM_EXTENSIONS 0x0904 /* cl_device_type - bitfield */ #define CL_DEVICE_TYPE_DEFAULT (1 << 0) #define CL_DEVICE_TYPE_CPU (1 << 1) #define CL_DEVICE_TYPE_GPU (1 << 2) #define CL_DEVICE_TYPE_ACCELERATOR (1 << 3) #define CL_DEVICE_TYPE_ALL 0xFFFFFFFF /* cl_device_info */ #define CL_DEVICE_TYPE 0x1000 #define CL_DEVICE_VENDOR_ID 0x1001 #define CL_DEVICE_MAX_COMPUTE_UNITS 0x1002 #define CL_DEVICE_MAX_WORK_ITEM_DIMENSIONS 0x1003 #define CL_DEVICE_MAX_WORK_GROUP_SIZE 0x1004 #define CL_DEVICE_MAX_WORK_ITEM_SIZES 0x1005 #define CL_DEVICE_PREFERRED_VECTOR_WIDTH_CHAR 0x1006 #define CL_DEVICE_PREFERRED_VECTOR_WIDTH_SHORT 0x1007 #define CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT 0x1008 #define CL_DEVICE_PREFERRED_VECTOR_WIDTH_LONG 0x1009 #define CL_DEVICE_PREFERRED_VECTOR_WIDTH_FLOAT 0x100A #define CL_DEVICE_PREFERRED_VECTOR_WIDTH_DOUBLE 0x100B #define CL_DEVICE_MAX_CLOCK_FREQUENCY 0x100C #define CL_DEVICE_ADDRESS_BITS 0x100D #define CL_DEVICE_MAX_READ_IMAGE_ARGS 0x100E #define CL_DEVICE_MAX_WRITE_IMAGE_ARGS 0x100F #define CL_DEVICE_MAX_MEM_ALLOC_SIZE 0x1010 #define CL_DEVICE_IMAGE2D_MAX_WIDTH 0x1011 #define CL_DEVICE_IMAGE2D_MAX_HEIGHT 0x1012 #define CL_DEVICE_IMAGE3D_MAX_WIDTH 0x1013 #define CL_DEVICE_IMAGE3D_MAX_HEIGHT 0x1014 #define CL_DEVICE_IMAGE3D_MAX_DEPTH 0x1015 #define CL_DEVICE_IMAGE_SUPPORT 0x1016 #define CL_DEVICE_MAX_PARAMETER_SIZE 0x1017 #define CL_DEVICE_MAX_SAMPLERS 0x1018 #define CL_DEVICE_MEM_BASE_ADDR_ALIGN 0x1019 #define CL_DEVICE_MIN_DATA_TYPE_ALIGN_SIZE 0x101A #define CL_DEVICE_SINGLE_FP_CONFIG 0x101B #define CL_DEVICE_GLOBAL_MEM_CACHE_TYPE 0x101C #define CL_DEVICE_GLOBAL_MEM_CACHELINE_SIZE 0x101D #define CL_DEVICE_GLOBAL_MEM_CACHE_SIZE 0x101E #define CL_DEVICE_GLOBAL_MEM_SIZE 0x101F #define CL_DEVICE_MAX_CONSTANT_BUFFER_SIZE 0x1020 #define CL_DEVICE_MAX_CONSTANT_ARGS 0x1021 #define CL_DEVICE_LOCAL_MEM_TYPE 0x1022 #define CL_DEVICE_LOCAL_MEM_SIZE 0x1023 #define CL_DEVICE_ERROR_CORRECTION_SUPPORT 0x1024 #define CL_DEVICE_PROFILING_TIMER_RESOLUTION 0x1025 #define CL_DEVICE_ENDIAN_LITTLE 0x1026 #define CL_DEVICE_AVAILABLE 0x1027 #define CL_DEVICE_COMPILER_AVAILABLE 0x1028 #define CL_DEVICE_EXECUTION_CAPABILITIES 0x1029 #define CL_DEVICE_QUEUE_PROPERTIES 0x102A #define CL_DEVICE_NAME 0x102B #define CL_DEVICE_VENDOR 0x102C #define CL_DRIVER_VERSION 0x102D #define CL_DEVICE_PROFILE 0x102E #define CL_DEVICE_VERSION 0x102F #define CL_DEVICE_EXTENSIONS 0x1030 #define CL_DEVICE_PLATFORM 0x1031 /* 0x1032 reserved for CL_DEVICE_DOUBLE_FP_CONFIG */ /* 0x1033 reserved for CL_DEVICE_HALF_FP_CONFIG */ /* cl_device_fp_config - bitfield */ #define CL_FP_DENORM (1 << 0) #define CL_FP_INF_NAN (1 << 1) #define CL_FP_ROUND_TO_NEAREST (1 << 2) #define CL_FP_ROUND_TO_ZERO (1 << 3) #define CL_FP_ROUND_TO_INF (1 << 4) #define CL_FP_FMA (1 << 5) /* cl_device_mem_cache_type */ #define CL_NONE 0x0 #define CL_READ_ONLY_CACHE 0x1 #define CL_READ_WRITE_CACHE 0x2 /* cl_device_local_mem_type */ #define CL_LOCAL 0x1 #define CL_GLOBAL 0x2 /* cl_device_exec_capabilities - bitfield */ #define CL_EXEC_KERNEL (1 << 0) #define CL_EXEC_NATIVE_KERNEL (1 << 1) /* cl_command_queue_properties - bitfield */ #define CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE (1 << 0) #define CL_QUEUE_PROFILING_ENABLE (1 << 1) /* cl_context_info */ #define CL_CONTEXT_REFERENCE_COUNT 0x1080 #define CL_CONTEXT_DEVICES 0x1081 #define CL_CONTEXT_PROPERTIES 0x1082 /* cl_context_info + cl_context_properties */ #define CL_CONTEXT_PLATFORM 0x1084 /* cl_command_queue_info */ #define CL_QUEUE_CONTEXT 0x1090 #define CL_QUEUE_DEVICE 0x1091 #define CL_QUEUE_REFERENCE_COUNT 0x1092 #define CL_QUEUE_PROPERTIES 0x1093 /* cl_mem_flags - bitfield */ #define CL_MEM_READ_WRITE (1 << 0) #define CL_MEM_WRITE_ONLY (1 << 1) #define CL_MEM_READ_ONLY (1 << 2) #define CL_MEM_USE_HOST_PTR (1 << 3) #define CL_MEM_ALLOC_HOST_PTR (1 << 4) #define CL_MEM_COPY_HOST_PTR (1 << 5) /* cl_channel_order */ #define CL_R 0x10B0 #define CL_A 0x10B1 #define CL_RG 0x10B2 #define CL_RA 0x10B3 #define CL_RGB 0x10B4 #define CL_RGBA 0x10B5 #define CL_BGRA 0x10B6 #define CL_ARGB 0x10B7 #define CL_INTENSITY 0x10B8 #define CL_LUMINANCE 0x10B9 /* cl_channel_type */ #define CL_SNORM_INT8 0x10D0 #define CL_SNORM_INT16 0x10D1 #define CL_UNORM_INT8 0x10D2 #define CL_UNORM_INT16 0x10D3 #define CL_UNORM_SHORT_565 0x10D4 #define CL_UNORM_SHORT_555 0x10D5 #define CL_UNORM_INT_101010 0x10D6 #define CL_SIGNED_INT8 0x10D7 #define CL_SIGNED_INT16 0x10D8 #define CL_SIGNED_INT32 0x10D9 #define CL_UNSIGNED_INT8 0x10DA #define CL_UNSIGNED_INT16 0x10DB #define CL_UNSIGNED_INT32 0x10DC #define CL_HALF_FLOAT 0x10DD #define CL_FLOAT 0x10DE /* cl_mem_object_type */ #define CL_MEM_OBJECT_BUFFER 0x10F0 #define CL_MEM_OBJECT_IMAGE2D 0x10F1 #define CL_MEM_OBJECT_IMAGE3D 0x10F2 /* cl_mem_info */ #define CL_MEM_TYPE 0x1100 #define CL_MEM_FLAGS 0x1101 #define CL_MEM_SIZE 0x1102 #define CL_MEM_HOST_PTR 0x1103 #define CL_MEM_MAP_COUNT 0x1104 #define CL_MEM_REFERENCE_COUNT 0x1105 #define CL_MEM_CONTEXT 0x1106 /* cl_image_info */ #define CL_IMAGE_FORMAT 0x1110 #define CL_IMAGE_ELEMENT_SIZE 0x1111 #define CL_IMAGE_ROW_PITCH 0x1112 #define CL_IMAGE_SLICE_PITCH 0x1113 #define CL_IMAGE_WIDTH 0x1114 #define CL_IMAGE_HEIGHT 0x1115 #define CL_IMAGE_DEPTH 0x1116 /* cl_addressing_mode */ #define CL_ADDRESS_NONE 0x1130 #define CL_ADDRESS_CLAMP_TO_EDGE 0x1131 #define CL_ADDRESS_CLAMP 0x1132 #define CL_ADDRESS_REPEAT 0x1133 /* cl_filter_mode */ #define CL_FILTER_NEAREST 0x1140 #define CL_FILTER_LINEAR 0x1141 /* cl_sampler_info */ #define CL_SAMPLER_REFERENCE_COUNT 0x1150 #define CL_SAMPLER_CONTEXT 0x1151 #define CL_SAMPLER_NORMALIZED_COORDS 0x1152 #define CL_SAMPLER_ADDRESSING_MODE 0x1153 #define CL_SAMPLER_FILTER_MODE 0x1154 /* cl_map_flags - bitfield */ #define CL_MAP_READ (1 << 0) #define CL_MAP_WRITE (1 << 1) /* cl_program_info */ #define CL_PROGRAM_REFERENCE_COUNT 0x1160 #define CL_PROGRAM_CONTEXT 0x1161 #define CL_PROGRAM_NUM_DEVICES 0x1162 #define CL_PROGRAM_DEVICES 0x1163 #define CL_PROGRAM_SOURCE 0x1164 #define CL_PROGRAM_BINARY_SIZES 0x1165 #define CL_PROGRAM_BINARIES 0x1166 /* cl_program_build_info */ #define CL_PROGRAM_BUILD_STATUS 0x1181 #define CL_PROGRAM_BUILD_OPTIONS 0x1182 #define CL_PROGRAM_BUILD_LOG 0x1183 /* cl_build_status */ #define CL_BUILD_SUCCESS 0 #define CL_BUILD_NONE -1 #define CL_BUILD_ERROR -2 #define CL_BUILD_IN_PROGRESS -3 /* cl_kernel_info */ #define CL_KERNEL_FUNCTION_NAME 0x1190 #define CL_KERNEL_NUM_ARGS 0x1191 #define CL_KERNEL_REFERENCE_COUNT 0x1192 #define CL_KERNEL_CONTEXT 0x1193 #define CL_KERNEL_PROGRAM 0x1194 /* cl_kernel_work_group_info */ #define CL_KERNEL_WORK_GROUP_SIZE 0x11B0 #define CL_KERNEL_COMPILE_WORK_GROUP_SIZE 0x11B1 #define CL_KERNEL_LOCAL_MEM_SIZE 0x11B2 /* cl_event_info */ #define CL_EVENT_COMMAND_QUEUE 0x11D0 #define CL_EVENT_COMMAND_TYPE 0x11D1 #define CL_EVENT_REFERENCE_COUNT 0x11D2 #define CL_EVENT_COMMAND_EXECUTION_STATUS 0x11D3 /* cl_command_type */ #define CL_COMMAND_NDRANGE_KERNEL 0x11F0 #define CL_COMMAND_TASK 0x11F1 #define CL_COMMAND_NATIVE_KERNEL 0x11F2 #define CL_COMMAND_READ_BUFFER 0x11F3 #define CL_COMMAND_WRITE_BUFFER 0x11F4 #define CL_COMMAND_COPY_BUFFER 0x11F5 #define CL_COMMAND_READ_IMAGE 0x11F6 #define CL_COMMAND_WRITE_IMAGE 0x11F7 #define CL_COMMAND_COPY_IMAGE 0x11F8 #define CL_COMMAND_COPY_IMAGE_TO_BUFFER 0x11F9 #define CL_COMMAND_COPY_BUFFER_TO_IMAGE 0x11FA #define CL_COMMAND_MAP_BUFFER 0x11FB #define CL_COMMAND_MAP_IMAGE 0x11FC #define CL_COMMAND_UNMAP_MEM_OBJECT 0x11FD #define CL_COMMAND_MARKER 0x11FE #define CL_COMMAND_ACQUIRE_GL_OBJECTS 0x11FF #define CL_COMMAND_RELEASE_GL_OBJECTS 0x1200 /* command execution status */ #define CL_COMPLETE 0x0 #define CL_RUNNING 0x1 #define CL_SUBMITTED 0x2 #define CL_QUEUED 0x3 /* cl_profiling_info */ #define CL_PROFILING_COMMAND_QUEUED 0x1280 #define CL_PROFILING_COMMAND_SUBMIT 0x1281 #define CL_PROFILING_COMMAND_START 0x1282 #define CL_PROFILING_COMMAND_END 0x1283 /********************************************************************************************************/ #ifndef OMIT_OPENCL_API /* Platform API */ extern CL_API_ENTRY cl_int CL_API_CALL clGetPlatformIDs(cl_uint /* num_entries */, cl_platform_id * /* platforms */, cl_uint * /* num_platforms */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetPlatformInfo(cl_platform_id /* platform */, cl_platform_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Device APIs */ extern CL_API_ENTRY cl_int CL_API_CALL clGetDeviceIDs(cl_platform_id /* platform */, cl_device_type /* device_type */, cl_uint /* num_entries */, cl_device_id * /* devices */, cl_uint * /* num_devices */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetDeviceInfo(cl_device_id /* device */, cl_device_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Context APIs */ extern CL_API_ENTRY cl_context CL_API_CALL clCreateContext(const cl_context_properties * /* properties */, cl_uint /* num_devices */, const cl_device_id * /* devices */, void (CL_CALLBACK * /* pfn_notify */)(const char *, const void *, size_t, void *), void * /* user_data */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_context CL_API_CALL clCreateContextFromType(const cl_context_properties * /* properties */, cl_device_type /* device_type */, void (CL_CALLBACK * /* pfn_notify*/ )(const char *, const void *, size_t, void *), void * /* user_data */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clRetainContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clReleaseContext(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetContextInfo(cl_context /* context */, cl_context_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Command Queue APIs */ extern CL_API_ENTRY cl_command_queue CL_API_CALL clCreateCommandQueue(cl_context /* context */, cl_device_id /* device */, cl_command_queue_properties /* properties */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clRetainCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clReleaseCommandQueue(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetCommandQueueInfo(cl_command_queue /* command_queue */, cl_command_queue_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clSetCommandQueueProperty(cl_command_queue /* command_queue */, cl_command_queue_properties /* properties */, cl_bool /* enable */, cl_command_queue_properties * /* old_properties */) CL_API_SUFFIX__VERSION_1_0; /* Memory Object APIs */ extern CL_API_ENTRY cl_mem CL_API_CALL clCreateBuffer(cl_context /* context */, cl_mem_flags /* flags */, size_t /* size */, void * /* host_ptr */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_mem CL_API_CALL clCreateImage2D(cl_context /* context */, cl_mem_flags /* flags */, const cl_image_format * /* image_format */, size_t /* image_width */, size_t /* image_height */, size_t /* image_row_pitch */, void * /* host_ptr */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_mem CL_API_CALL clCreateImage3D(cl_context /* context */, cl_mem_flags /* flags */, const cl_image_format * /* image_format */, size_t /* image_width */, size_t /* image_height */, size_t /* image_depth */, size_t /* image_row_pitch */, size_t /* image_slice_pitch */, void * /* host_ptr */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clRetainMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clReleaseMemObject(cl_mem /* memobj */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetSupportedImageFormats(cl_context /* context */, cl_mem_flags /* flags */, cl_mem_object_type /* image_type */, cl_uint /* num_entries */, cl_image_format * /* image_formats */, cl_uint * /* num_image_formats */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetMemObjectInfo(cl_mem /* memobj */, cl_mem_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetImageInfo(cl_mem /* image */, cl_image_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Sampler APIs */ extern CL_API_ENTRY cl_sampler CL_API_CALL clCreateSampler(cl_context /* context */, cl_bool /* normalized_coords */, cl_addressing_mode /* addressing_mode */, cl_filter_mode /* filter_mode */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clRetainSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clReleaseSampler(cl_sampler /* sampler */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetSamplerInfo(cl_sampler /* sampler */, cl_sampler_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Program Object APIs */ extern CL_API_ENTRY cl_program CL_API_CALL clCreateProgramWithSource(cl_context /* context */, cl_uint /* count */, const char ** /* strings */, const size_t * /* lengths */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_program CL_API_CALL clCreateProgramWithBinary(cl_context /* context */, cl_uint /* num_devices */, const cl_device_id * /* device_list */, const size_t * /* lengths */, const unsigned char ** /* binaries */, cl_int * /* binary_status */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clRetainProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clReleaseProgram(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clBuildProgram(cl_program /* program */, cl_uint /* num_devices */, const cl_device_id * /* device_list */, const char * /* options */, void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */), void * /* user_data */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clUnloadCompiler(void) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetProgramInfo(cl_program /* program */, cl_program_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetProgramBuildInfo(cl_program /* program */, cl_device_id /* device */, cl_program_build_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Kernel Object APIs */ extern CL_API_ENTRY cl_kernel CL_API_CALL clCreateKernel(cl_program /* program */, const char * /* kernel_name */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clCreateKernelsInProgram(cl_program /* program */, cl_uint /* num_kernels */, cl_kernel * /* kernels */, cl_uint * /* num_kernels_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clRetainKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clReleaseKernel(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clSetKernelArg(cl_kernel /* kernel */, cl_uint /* arg_index */, size_t /* arg_size */, const void * /* arg_value */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetKernelInfo(cl_kernel /* kernel */, cl_kernel_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetKernelWorkGroupInfo(cl_kernel /* kernel */, cl_device_id /* device */, cl_kernel_work_group_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Event Object APIs */ extern CL_API_ENTRY cl_int CL_API_CALL clWaitForEvents(cl_uint /* num_events */, const cl_event * /* event_list */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clGetEventInfo(cl_event /* event */, cl_event_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clRetainEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clReleaseEvent(cl_event /* event */) CL_API_SUFFIX__VERSION_1_0; /* Profiling APIs */ extern CL_API_ENTRY cl_int CL_API_CALL clGetEventProfilingInfo(cl_event /* event */, cl_profiling_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Flush and Finish APIs */ extern CL_API_ENTRY cl_int CL_API_CALL clFlush(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clFinish(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; /* Enqueued Commands APIs */ extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueReadBuffer(cl_command_queue /* command_queue */, cl_mem /* buffer */, cl_bool /* blocking_read */, size_t /* offset */, size_t /* cb */, void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueWriteBuffer(cl_command_queue /* command_queue */, cl_mem /* buffer */, cl_bool /* blocking_write */, size_t /* offset */, size_t /* cb */, const void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueCopyBuffer(cl_command_queue /* command_queue */, cl_mem /* src_buffer */, cl_mem /* dst_buffer */, size_t /* src_offset */, size_t /* dst_offset */, size_t /* cb */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueReadImage(cl_command_queue /* command_queue */, cl_mem /* image */, cl_bool /* blocking_read */, const size_t * /* origin[3] */, const size_t * /* region[3] */, size_t /* row_pitch */, size_t /* slice_pitch */, void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueWriteImage(cl_command_queue /* command_queue */, cl_mem /* image */, cl_bool /* blocking_write */, const size_t * /* origin[3] */, const size_t * /* region[3] */, size_t /* input_row_pitch */, size_t /* input_slice_pitch */, const void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueCopyImage(cl_command_queue /* command_queue */, cl_mem /* src_image */, cl_mem /* dst_image */, const size_t * /* src_origin[3] */, const size_t * /* dst_origin[3] */, const size_t * /* region[3] */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueCopyImageToBuffer(cl_command_queue /* command_queue */, cl_mem /* src_image */, cl_mem /* dst_buffer */, const size_t * /* src_origin[3] */, const size_t * /* region[3] */, size_t /* dst_offset */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueCopyBufferToImage(cl_command_queue /* command_queue */, cl_mem /* src_buffer */, cl_mem /* dst_image */, size_t /* src_offset */, const size_t * /* dst_origin[3] */, const size_t * /* region[3] */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY void * CL_API_CALL clEnqueueMapBuffer(cl_command_queue /* command_queue */, cl_mem /* buffer */, cl_bool /* blocking_map */, cl_map_flags /* map_flags */, size_t /* offset */, size_t /* cb */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY void * CL_API_CALL clEnqueueMapImage(cl_command_queue /* command_queue */, cl_mem /* image */, cl_bool /* blocking_map */, cl_map_flags /* map_flags */, const size_t * /* origin[3] */, const size_t * /* region[3] */, size_t * /* image_row_pitch */, size_t * /* image_slice_pitch */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueUnmapMemObject(cl_command_queue /* command_queue */, cl_mem /* memobj */, void * /* mapped_ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueNDRangeKernel(cl_command_queue /* command_queue */, cl_kernel /* kernel */, cl_uint /* work_dim */, const size_t * /* global_work_offset */, const size_t * /* global_work_size */, const size_t * /* local_work_size */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueTask(cl_command_queue /* command_queue */, cl_kernel /* kernel */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueNativeKernel(cl_command_queue /* command_queue */, void (*user_func)(void *), void * /* args */, size_t /* cb_args */, cl_uint /* num_mem_objects */, const cl_mem * /* mem_list */, const void ** /* args_mem_loc */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueMarker(cl_command_queue /* command_queue */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueWaitForEvents(cl_command_queue /* command_queue */, cl_uint /* num_events */, const cl_event * /* event_list */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL clEnqueueBarrier(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; /* Extension function access * * Returns the extension function address for the given function name, * or NULL if a valid function can not be found. The client must * check to make sure the address is not NULL, before using or * calling the returned function address. */ extern CL_API_ENTRY void * CL_API_CALL clGetExtensionFunctionAddress(const char * /* func_name */) CL_API_SUFFIX__VERSION_1_0; #endif /* OMIT_OPENCL_API */ #ifdef __cplusplus } #endif #endif /* __OPENCL_CL_H */ bfgminer-bfgminer-3.10.0/CL/cl_platform.h000066400000000000000000001020121226556647300201400ustar00rootroot00000000000000/********************************************************************************** * Copyright (c) 2008-2010 The Khronos Group Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and/or associated documentation files (the * "Materials"), to deal in the Materials without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Materials, and to * permit persons to whom the Materials are furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Materials. * * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. **********************************************************************************/ /* $Revision: 14830 $ on $Date: 2011-05-26 08:34:31 -0700 (Thu, 26 May 2011) $ */ #ifndef __CL_PLATFORM_H #define __CL_PLATFORM_H #ifdef __APPLE__ /* Contains #defines for AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER below */ #include #endif #ifdef __cplusplus extern "C" { #endif #if defined(_WIN32) #define CL_API_ENTRY #define CL_API_CALL __stdcall #else #define CL_API_ENTRY #define CL_API_CALL #endif #ifdef __APPLE__ #define CL_API_SUFFIX__VERSION_1_0 AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER #define CL_API_SUFFIX__VERSION_1_1 #define CL_EXTENSION_WEAK_LINK __attribute__((weak_import)) #else #define CL_API_SUFFIX__VERSION_1_0 #define CL_API_SUFFIX__VERSION_1_1 #define CL_EXTENSION_WEAK_LINK #endif #if (defined (_WIN32) && defined(_MSC_VER)) /* scalar types */ typedef signed __int8 cl_char; typedef unsigned __int8 cl_uchar; typedef signed __int16 cl_short; typedef unsigned __int16 cl_ushort; typedef signed __int32 cl_int; typedef unsigned __int32 cl_uint; typedef signed __int64 cl_long; typedef unsigned __int64 cl_ulong; typedef unsigned __int16 cl_half; typedef float cl_float; typedef double cl_double; /* Macro names and corresponding values defined by OpenCL */ #define CL_CHAR_BIT 8 #define CL_SCHAR_MAX 127 #define CL_SCHAR_MIN (-127-1) #define CL_CHAR_MAX CL_SCHAR_MAX #define CL_CHAR_MIN CL_SCHAR_MIN #define CL_UCHAR_MAX 255 #define CL_SHRT_MAX 32767 #define CL_SHRT_MIN (-32767-1) #define CL_USHRT_MAX 65535 #define CL_INT_MAX 2147483647 #define CL_INT_MIN (-2147483647-1) #define CL_UINT_MAX 0xffffffffU #define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) #define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) #define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) #define CL_FLT_DIG 6 #define CL_FLT_MANT_DIG 24 #define CL_FLT_MAX_10_EXP +38 #define CL_FLT_MAX_EXP +128 #define CL_FLT_MIN_10_EXP -37 #define CL_FLT_MIN_EXP -125 #define CL_FLT_RADIX 2 #define CL_FLT_MAX 340282346638528859811704183484516925440.0f #define CL_FLT_MIN 1.175494350822287507969e-38f #define CL_FLT_EPSILON 0x1.0p-23f #define CL_DBL_DIG 15 #define CL_DBL_MANT_DIG 53 #define CL_DBL_MAX_10_EXP +308 #define CL_DBL_MAX_EXP +1024 #define CL_DBL_MIN_10_EXP -307 #define CL_DBL_MIN_EXP -1021 #define CL_DBL_RADIX 2 #define CL_DBL_MAX 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.0 #define CL_DBL_MIN 2.225073858507201383090e-308 #define CL_DBL_EPSILON 2.220446049250313080847e-16 #define CL_NAN (CL_INFINITY - CL_INFINITY) #define CL_HUGE_VALF ((cl_float) 1e50) #define CL_HUGE_VAL ((cl_double) 1e500) #define CL_MAXFLOAT CL_FLT_MAX #define CL_INFINITY CL_HUGE_VALF #define CL_CALLBACK __stdcall #else #include /* scalar types */ typedef int8_t cl_char; typedef uint8_t cl_uchar; typedef int16_t cl_short __attribute__((aligned(2))); typedef uint16_t cl_ushort __attribute__((aligned(2))); typedef int32_t cl_int __attribute__((aligned(4))); typedef uint32_t cl_uint __attribute__((aligned(4))); typedef int64_t cl_long __attribute__((aligned(8))); typedef uint64_t cl_ulong __attribute__((aligned(8))); typedef uint16_t cl_half __attribute__((aligned(2))); typedef float cl_float __attribute__((aligned(4))); typedef double cl_double __attribute__((aligned(8))); /* Macro names and corresponding values defined by OpenCL */ #define CL_CHAR_BIT 8 #define CL_SCHAR_MAX 127 #define CL_SCHAR_MIN (-127-1) #define CL_CHAR_MAX CL_SCHAR_MAX #define CL_CHAR_MIN CL_SCHAR_MIN #define CL_UCHAR_MAX 255 #define CL_SHRT_MAX 32767 #define CL_SHRT_MIN (-32767-1) #define CL_USHRT_MAX 65535 #define CL_INT_MAX 2147483647 #define CL_INT_MIN (-2147483647-1) #define CL_UINT_MAX 0xffffffffU #define CL_LONG_MAX ((cl_long) 0x7FFFFFFFFFFFFFFFLL) #define CL_LONG_MIN ((cl_long) -0x7FFFFFFFFFFFFFFFLL - 1LL) #define CL_ULONG_MAX ((cl_ulong) 0xFFFFFFFFFFFFFFFFULL) #define CL_FLT_DIG 6 #define CL_FLT_MANT_DIG 24 #define CL_FLT_MAX_10_EXP +38 #define CL_FLT_MAX_EXP +128 #define CL_FLT_MIN_10_EXP -37 #define CL_FLT_MIN_EXP -125 #define CL_FLT_RADIX 2 #define CL_FLT_MAX 0x1.fffffep127f #define CL_FLT_MIN 0x1.0p-126f #define CL_FLT_EPSILON 0x1.0p-23f #define CL_DBL_DIG 15 #define CL_DBL_MANT_DIG 53 #define CL_DBL_MAX_10_EXP +308 #define CL_DBL_MAX_EXP +1024 #define CL_DBL_MIN_10_EXP -307 #define CL_DBL_MIN_EXP -1021 #define CL_DBL_RADIX 2 #define CL_DBL_MAX 0x1.fffffffffffffp1023 #define CL_DBL_MIN 0x1.0p-1022 #define CL_DBL_EPSILON 0x1.0p-52 #if (defined( __GNUC__ ) || defined( __IBMC__ )) #define CL_HUGE_VALF __builtin_huge_valf() #define CL_HUGE_VAL __builtin_huge_val() #define CL_NAN __builtin_nanf( "" ) #else #define CL_HUGE_VALF ((cl_float) 1e50) #define CL_HUGE_VAL ((cl_double) 1e500) float nanf( const char * ); #define CL_NAN nanf( "" ) #endif #define CL_MAXFLOAT CL_FLT_MAX #define CL_INFINITY CL_HUGE_VALF #define CL_CALLBACK #endif #include /* Mirror types to GL types. Mirror types allow us to avoid deciding which headers to load based on whether we are using GL or GLES here. */ typedef unsigned int cl_GLuint; typedef int cl_GLint; typedef unsigned int cl_GLenum; /* * Vector types * * Note: OpenCL requires that all types be naturally aligned. * This means that vector types must be naturally aligned. * For example, a vector of four floats must be aligned to * a 16 byte boundary (calculated as 4 * the natural 4-byte * alignment of the float). The alignment qualifiers here * will only function properly if your compiler supports them * and if you don't actively work to defeat them. For example, * in order for a cl_float4 to be 16 byte aligned in a struct, * the start of the struct must itself be 16-byte aligned. * * Maintaining proper alignment is the user's responsibility. */ /* Define basic vector types */ #if defined( __VEC__ ) #include /* may be omitted depending on compiler. AltiVec spec provides no way to detect whether the header is required. */ typedef vector unsigned char __cl_uchar16; typedef vector signed char __cl_char16; typedef vector unsigned short __cl_ushort8; typedef vector signed short __cl_short8; typedef vector unsigned int __cl_uint4; typedef vector signed int __cl_int4; typedef vector float __cl_float4; #define __CL_UCHAR16__ 1 #define __CL_CHAR16__ 1 #define __CL_USHORT8__ 1 #define __CL_SHORT8__ 1 #define __CL_UINT4__ 1 #define __CL_INT4__ 1 #define __CL_FLOAT4__ 1 #endif #if defined( __SSE__ ) #if defined( __MINGW64__ ) #include #else #include #endif #if defined( __GNUC__ ) typedef float __cl_float4 __attribute__((vector_size(16))); #else typedef __m128 __cl_float4; #endif #define __CL_FLOAT4__ 1 #endif #if defined( __SSE2__ ) #if defined( __MINGW64__ ) #include #else #include #endif #if defined( __GNUC__ ) typedef cl_uchar __cl_uchar16 __attribute__((vector_size(16))); typedef cl_char __cl_char16 __attribute__((vector_size(16))); typedef cl_ushort __cl_ushort8 __attribute__((vector_size(16))); typedef cl_short __cl_short8 __attribute__((vector_size(16))); typedef cl_uint __cl_uint4 __attribute__((vector_size(16))); typedef cl_int __cl_int4 __attribute__((vector_size(16))); typedef cl_ulong __cl_ulong2 __attribute__((vector_size(16))); typedef cl_long __cl_long2 __attribute__((vector_size(16))); typedef cl_double __cl_double2 __attribute__((vector_size(16))); #else typedef __m128i __cl_uchar16; typedef __m128i __cl_char16; typedef __m128i __cl_ushort8; typedef __m128i __cl_short8; typedef __m128i __cl_uint4; typedef __m128i __cl_int4; typedef __m128i __cl_ulong2; typedef __m128i __cl_long2; typedef __m128d __cl_double2; #endif #define __CL_UCHAR16__ 1 #define __CL_CHAR16__ 1 #define __CL_USHORT8__ 1 #define __CL_SHORT8__ 1 #define __CL_INT4__ 1 #define __CL_UINT4__ 1 #define __CL_ULONG2__ 1 #define __CL_LONG2__ 1 #define __CL_DOUBLE2__ 1 #endif #if defined( __MMX__ ) #include #if defined( __GNUC__ ) typedef cl_uchar __cl_uchar8 __attribute__((vector_size(8))); typedef cl_char __cl_char8 __attribute__((vector_size(8))); typedef cl_ushort __cl_ushort4 __attribute__((vector_size(8))); typedef cl_short __cl_short4 __attribute__((vector_size(8))); typedef cl_uint __cl_uint2 __attribute__((vector_size(8))); typedef cl_int __cl_int2 __attribute__((vector_size(8))); typedef cl_ulong __cl_ulong1 __attribute__((vector_size(8))); typedef cl_long __cl_long1 __attribute__((vector_size(8))); typedef cl_float __cl_float2 __attribute__((vector_size(8))); #else typedef __m64 __cl_uchar8; typedef __m64 __cl_char8; typedef __m64 __cl_ushort4; typedef __m64 __cl_short4; typedef __m64 __cl_uint2; typedef __m64 __cl_int2; typedef __m64 __cl_ulong1; typedef __m64 __cl_long1; typedef __m64 __cl_float2; #endif #define __CL_UCHAR8__ 1 #define __CL_CHAR8__ 1 #define __CL_USHORT4__ 1 #define __CL_SHORT4__ 1 #define __CL_INT2__ 1 #define __CL_UINT2__ 1 #define __CL_ULONG1__ 1 #define __CL_LONG1__ 1 #define __CL_FLOAT2__ 1 #endif #if defined( __AVX__ ) #if defined( __MINGW64__ ) #include #else #include #endif #if defined( __GNUC__ ) typedef cl_float __cl_float8 __attribute__((vector_size(32))); typedef cl_double __cl_double4 __attribute__((vector_size(32))); #else typedef __m256 __cl_float8; typedef __m256d __cl_double4; #endif #define __CL_FLOAT8__ 1 #define __CL_DOUBLE4__ 1 #endif /* Define alignment keys */ #if (defined( __GNUC__ ) || defined( __IBMC__ )) #define CL_ALIGNED(_x) __attribute__ ((aligned(_x))) #elif defined( _WIN32) && (_MSC_VER) /* Alignment keys neutered on windows because MSVC can't swallow function arguments with alignment requirements */ /* http://msdn.microsoft.com/en-us/library/373ak2y1%28VS.71%29.aspx */ /* #include */ /* #define CL_ALIGNED(_x) _CRT_ALIGN(_x) */ #define CL_ALIGNED(_x) #else #warning Need to implement some method to align data here #define CL_ALIGNED(_x) #endif /* Indicate whether .xyzw, .s0123 and .hi.lo are supported */ #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) /* .xyzw and .s0123...{f|F} are supported */ #define CL_HAS_NAMED_VECTOR_FIELDS 1 /* .hi and .lo are supported */ #define CL_HAS_HI_LO_VECTOR_FIELDS 1 #endif /* Define cl_vector types */ /* ---- cl_charn ---- */ typedef union { cl_char CL_ALIGNED(2) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_char x, y; }; __extension__ struct{ cl_char s0, s1; }; __extension__ struct{ cl_char lo, hi; }; #endif #if defined( __CL_CHAR2__) __cl_char2 v2; #endif }cl_char2; typedef union { cl_char CL_ALIGNED(4) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_char x, y, z, w; }; __extension__ struct{ cl_char s0, s1, s2, s3; }; __extension__ struct{ cl_char2 lo, hi; }; #endif #if defined( __CL_CHAR2__) __cl_char2 v2[2]; #endif #if defined( __CL_CHAR4__) __cl_char4 v4; #endif }cl_char4; typedef union { cl_char CL_ALIGNED(8) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_char x, y, z, w; }; __extension__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_char4 lo, hi; }; #endif #if defined( __CL_CHAR2__) __cl_char2 v2[4]; #endif #if defined( __CL_CHAR4__) __cl_char4 v4[2]; #endif #if defined( __CL_CHAR8__ ) __cl_char8 v8; #endif }cl_char8; typedef union { cl_char CL_ALIGNED(16) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_char x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_char s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_char8 lo, hi; }; #endif #if defined( __CL_CHAR2__) __cl_char2 v2[8]; #endif #if defined( __CL_CHAR4__) __cl_char4 v4[4]; #endif #if defined( __CL_CHAR8__ ) __cl_char8 v8[2]; #endif #if defined( __CL_CHAR16__ ) __cl_char16 v16; #endif }cl_char16; /* ---- cl_ucharn ---- */ typedef union { cl_uchar CL_ALIGNED(2) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uchar x, y; }; __extension__ struct{ cl_uchar s0, s1; }; __extension__ struct{ cl_uchar lo, hi; }; #endif #if defined( __cl_uchar2__) __cl_uchar2 v2; #endif }cl_uchar2; typedef union { cl_uchar CL_ALIGNED(4) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uchar x, y, z, w; }; __extension__ struct{ cl_uchar s0, s1, s2, s3; }; __extension__ struct{ cl_uchar2 lo, hi; }; #endif #if defined( __CL_UCHAR2__) __cl_uchar2 v2[2]; #endif #if defined( __CL_UCHAR4__) __cl_uchar4 v4; #endif }cl_uchar4; typedef union { cl_uchar CL_ALIGNED(8) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uchar x, y, z, w; }; __extension__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_uchar4 lo, hi; }; #endif #if defined( __CL_UCHAR2__) __cl_uchar2 v2[4]; #endif #if defined( __CL_UCHAR4__) __cl_uchar4 v4[2]; #endif #if defined( __CL_UCHAR8__ ) __cl_uchar8 v8; #endif }cl_uchar8; typedef union { cl_uchar CL_ALIGNED(16) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uchar x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_uchar s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_uchar8 lo, hi; }; #endif #if defined( __CL_UCHAR2__) __cl_uchar2 v2[8]; #endif #if defined( __CL_UCHAR4__) __cl_uchar4 v4[4]; #endif #if defined( __CL_UCHAR8__ ) __cl_uchar8 v8[2]; #endif #if defined( __CL_UCHAR16__ ) __cl_uchar16 v16; #endif }cl_uchar16; /* ---- cl_shortn ---- */ typedef union { cl_short CL_ALIGNED(4) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_short x, y; }; __extension__ struct{ cl_short s0, s1; }; __extension__ struct{ cl_short lo, hi; }; #endif #if defined( __CL_SHORT2__) __cl_short2 v2; #endif }cl_short2; typedef union { cl_short CL_ALIGNED(8) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_short x, y, z, w; }; __extension__ struct{ cl_short s0, s1, s2, s3; }; __extension__ struct{ cl_short2 lo, hi; }; #endif #if defined( __CL_SHORT2__) __cl_short2 v2[2]; #endif #if defined( __CL_SHORT4__) __cl_short4 v4; #endif }cl_short4; typedef union { cl_short CL_ALIGNED(16) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_short x, y, z, w; }; __extension__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_short4 lo, hi; }; #endif #if defined( __CL_SHORT2__) __cl_short2 v2[4]; #endif #if defined( __CL_SHORT4__) __cl_short4 v4[2]; #endif #if defined( __CL_SHORT8__ ) __cl_short8 v8; #endif }cl_short8; typedef union { cl_short CL_ALIGNED(32) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_short x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_short s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_short8 lo, hi; }; #endif #if defined( __CL_SHORT2__) __cl_short2 v2[8]; #endif #if defined( __CL_SHORT4__) __cl_short4 v4[4]; #endif #if defined( __CL_SHORT8__ ) __cl_short8 v8[2]; #endif #if defined( __CL_SHORT16__ ) __cl_short16 v16; #endif }cl_short16; /* ---- cl_ushortn ---- */ typedef union { cl_ushort CL_ALIGNED(4) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ushort x, y; }; __extension__ struct{ cl_ushort s0, s1; }; __extension__ struct{ cl_ushort lo, hi; }; #endif #if defined( __CL_USHORT2__) __cl_ushort2 v2; #endif }cl_ushort2; typedef union { cl_ushort CL_ALIGNED(8) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ushort x, y, z, w; }; __extension__ struct{ cl_ushort s0, s1, s2, s3; }; __extension__ struct{ cl_ushort2 lo, hi; }; #endif #if defined( __CL_USHORT2__) __cl_ushort2 v2[2]; #endif #if defined( __CL_USHORT4__) __cl_ushort4 v4; #endif }cl_ushort4; typedef union { cl_ushort CL_ALIGNED(16) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ushort x, y, z, w; }; __extension__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_ushort4 lo, hi; }; #endif #if defined( __CL_USHORT2__) __cl_ushort2 v2[4]; #endif #if defined( __CL_USHORT4__) __cl_ushort4 v4[2]; #endif #if defined( __CL_USHORT8__ ) __cl_ushort8 v8; #endif }cl_ushort8; typedef union { cl_ushort CL_ALIGNED(32) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ushort x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_ushort s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_ushort8 lo, hi; }; #endif #if defined( __CL_USHORT2__) __cl_ushort2 v2[8]; #endif #if defined( __CL_USHORT4__) __cl_ushort4 v4[4]; #endif #if defined( __CL_USHORT8__ ) __cl_ushort8 v8[2]; #endif #if defined( __CL_USHORT16__ ) __cl_ushort16 v16; #endif }cl_ushort16; /* ---- cl_intn ---- */ typedef union { cl_int CL_ALIGNED(8) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_int x, y; }; __extension__ struct{ cl_int s0, s1; }; __extension__ struct{ cl_int lo, hi; }; #endif #if defined( __CL_INT2__) __cl_int2 v2; #endif }cl_int2; typedef union { cl_int CL_ALIGNED(16) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_int x, y, z, w; }; __extension__ struct{ cl_int s0, s1, s2, s3; }; __extension__ struct{ cl_int2 lo, hi; }; #endif #if defined( __CL_INT2__) __cl_int2 v2[2]; #endif #if defined( __CL_INT4__) __cl_int4 v4; #endif }cl_int4; typedef union { cl_int CL_ALIGNED(32) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_int x, y, z, w; }; __extension__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_int4 lo, hi; }; #endif #if defined( __CL_INT2__) __cl_int2 v2[4]; #endif #if defined( __CL_INT4__) __cl_int4 v4[2]; #endif #if defined( __CL_INT8__ ) __cl_int8 v8; #endif }cl_int8; typedef union { cl_int CL_ALIGNED(64) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_int x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_int8 lo, hi; }; #endif #if defined( __CL_INT2__) __cl_int2 v2[8]; #endif #if defined( __CL_INT4__) __cl_int4 v4[4]; #endif #if defined( __CL_INT8__ ) __cl_int8 v8[2]; #endif #if defined( __CL_INT16__ ) __cl_int16 v16; #endif }cl_int16; /* ---- cl_uintn ---- */ typedef union { cl_uint CL_ALIGNED(8) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uint x, y; }; __extension__ struct{ cl_uint s0, s1; }; __extension__ struct{ cl_uint lo, hi; }; #endif #if defined( __CL_UINT2__) __cl_uint2 v2; #endif }cl_uint2; typedef union { cl_uint CL_ALIGNED(16) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uint x, y, z, w; }; __extension__ struct{ cl_uint s0, s1, s2, s3; }; __extension__ struct{ cl_uint2 lo, hi; }; #endif #if defined( __CL_UINT2__) __cl_uint2 v2[2]; #endif #if defined( __CL_UINT4__) __cl_uint4 v4; #endif }cl_uint4; typedef union { cl_uint CL_ALIGNED(32) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uint x, y, z, w; }; __extension__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_uint4 lo, hi; }; #endif #if defined( __CL_UINT2__) __cl_uint2 v2[4]; #endif #if defined( __CL_UINT4__) __cl_uint4 v4[2]; #endif #if defined( __CL_UINT8__ ) __cl_uint8 v8; #endif }cl_uint8; typedef union { cl_uint CL_ALIGNED(64) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_uint x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_uint s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_uint8 lo, hi; }; #endif #if defined( __CL_UINT2__) __cl_uint2 v2[8]; #endif #if defined( __CL_UINT4__) __cl_uint4 v4[4]; #endif #if defined( __CL_UINT8__ ) __cl_uint8 v8[2]; #endif #if defined( __CL_UINT16__ ) __cl_uint16 v16; #endif }cl_uint16; /* ---- cl_longn ---- */ typedef union { cl_long CL_ALIGNED(16) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_long x, y; }; __extension__ struct{ cl_long s0, s1; }; __extension__ struct{ cl_long lo, hi; }; #endif #if defined( __CL_LONG2__) __cl_long2 v2; #endif }cl_long2; typedef union { cl_long CL_ALIGNED(32) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_long x, y, z, w; }; __extension__ struct{ cl_long s0, s1, s2, s3; }; __extension__ struct{ cl_long2 lo, hi; }; #endif #if defined( __CL_LONG2__) __cl_long2 v2[2]; #endif #if defined( __CL_LONG4__) __cl_long4 v4; #endif }cl_long4; typedef union { cl_long CL_ALIGNED(64) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_long x, y, z, w; }; __extension__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_long4 lo, hi; }; #endif #if defined( __CL_LONG2__) __cl_long2 v2[4]; #endif #if defined( __CL_LONG4__) __cl_long4 v4[2]; #endif #if defined( __CL_LONG8__ ) __cl_long8 v8; #endif }cl_long8; typedef union { cl_long CL_ALIGNED(128) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_long x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_long s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_long8 lo, hi; }; #endif #if defined( __CL_LONG2__) __cl_long2 v2[8]; #endif #if defined( __CL_LONG4__) __cl_long4 v4[4]; #endif #if defined( __CL_LONG8__ ) __cl_long8 v8[2]; #endif #if defined( __CL_LONG16__ ) __cl_long16 v16; #endif }cl_long16; /* ---- cl_ulongn ---- */ typedef union { cl_ulong CL_ALIGNED(16) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ulong x, y; }; __extension__ struct{ cl_ulong s0, s1; }; __extension__ struct{ cl_ulong lo, hi; }; #endif #if defined( __CL_ULONG2__) __cl_ulong2 v2; #endif }cl_ulong2; typedef union { cl_ulong CL_ALIGNED(32) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ulong x, y, z, w; }; __extension__ struct{ cl_ulong s0, s1, s2, s3; }; __extension__ struct{ cl_ulong2 lo, hi; }; #endif #if defined( __CL_ULONG2__) __cl_ulong2 v2[2]; #endif #if defined( __CL_ULONG4__) __cl_ulong4 v4; #endif }cl_ulong4; typedef union { cl_ulong CL_ALIGNED(64) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ulong x, y, z, w; }; __extension__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_ulong4 lo, hi; }; #endif #if defined( __CL_ULONG2__) __cl_ulong2 v2[4]; #endif #if defined( __CL_ULONG4__) __cl_ulong4 v4[2]; #endif #if defined( __CL_ULONG8__ ) __cl_ulong8 v8; #endif }cl_ulong8; typedef union { cl_ulong CL_ALIGNED(128) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_ulong x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_ulong s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_ulong8 lo, hi; }; #endif #if defined( __CL_ULONG2__) __cl_ulong2 v2[8]; #endif #if defined( __CL_ULONG4__) __cl_ulong4 v4[4]; #endif #if defined( __CL_ULONG8__ ) __cl_ulong8 v8[2]; #endif #if defined( __CL_ULONG16__ ) __cl_ulong16 v16; #endif }cl_ulong16; /* --- cl_floatn ---- */ typedef union { cl_float CL_ALIGNED(8) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_float x, y; }; __extension__ struct{ cl_float s0, s1; }; __extension__ struct{ cl_float lo, hi; }; #endif #if defined( __CL_FLOAT2__) __cl_float2 v2; #endif }cl_float2; typedef union { cl_float CL_ALIGNED(16) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_float x, y, z, w; }; __extension__ struct{ cl_float s0, s1, s2, s3; }; __extension__ struct{ cl_float2 lo, hi; }; #endif #if defined( __CL_FLOAT2__) __cl_float2 v2[2]; #endif #if defined( __CL_FLOAT4__) __cl_float4 v4; #endif }cl_float4; typedef union { cl_float CL_ALIGNED(32) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_float x, y, z, w; }; __extension__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_float4 lo, hi; }; #endif #if defined( __CL_FLOAT2__) __cl_float2 v2[4]; #endif #if defined( __CL_FLOAT4__) __cl_float4 v4[2]; #endif #if defined( __CL_FLOAT8__ ) __cl_float8 v8; #endif }cl_float8; typedef union { cl_float CL_ALIGNED(64) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_float x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_float s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_float8 lo, hi; }; #endif #if defined( __CL_FLOAT2__) __cl_float2 v2[8]; #endif #if defined( __CL_FLOAT4__) __cl_float4 v4[4]; #endif #if defined( __CL_FLOAT8__ ) __cl_float8 v8[2]; #endif #if defined( __CL_FLOAT16__ ) __cl_float16 v16; #endif }cl_float16; /* --- cl_doublen ---- */ typedef union { cl_double CL_ALIGNED(16) s[2]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_double x, y; }; __extension__ struct{ cl_double s0, s1; }; __extension__ struct{ cl_double lo, hi; }; #endif #if defined( __CL_DOUBLE2__) __cl_double2 v2; #endif }cl_double2; typedef union { cl_double CL_ALIGNED(32) s[4]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_double x, y, z, w; }; __extension__ struct{ cl_double s0, s1, s2, s3; }; __extension__ struct{ cl_double2 lo, hi; }; #endif #if defined( __CL_DOUBLE2__) __cl_double2 v2[2]; #endif #if defined( __CL_DOUBLE4__) __cl_double4 v4; #endif }cl_double4; typedef union { cl_double CL_ALIGNED(64) s[8]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_double x, y, z, w; }; __extension__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7; }; __extension__ struct{ cl_double4 lo, hi; }; #endif #if defined( __CL_DOUBLE2__) __cl_double2 v2[4]; #endif #if defined( __CL_DOUBLE4__) __cl_double4 v4[2]; #endif #if defined( __CL_DOUBLE8__ ) __cl_double8 v8; #endif }cl_double8; typedef union { cl_double CL_ALIGNED(128) s[16]; #if (defined( __GNUC__) || defined( __IBMC__ )) && ! defined( __STRICT_ANSI__ ) __extension__ struct{ cl_double x, y, z, w, __spacer4, __spacer5, __spacer6, __spacer7, __spacer8, __spacer9, sa, sb, sc, sd, se, sf; }; __extension__ struct{ cl_double s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, sA, sB, sC, sD, sE, sF; }; __extension__ struct{ cl_double8 lo, hi; }; #endif #if defined( __CL_DOUBLE2__) __cl_double2 v2[8]; #endif #if defined( __CL_DOUBLE4__) __cl_double4 v4[4]; #endif #if defined( __CL_DOUBLE8__ ) __cl_double8 v8[2]; #endif #if defined( __CL_DOUBLE16__ ) __cl_double16 v16; #endif }cl_double16; #ifdef __cplusplus } #endif #endif /* __CL_PLATFORM_H */ bfgminer-bfgminer-3.10.0/COPYING000066400000000000000000001045131226556647300162320ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . bfgminer-bfgminer-3.10.0/ChangeLog000066400000000000000000000002401226556647300167410ustar00rootroot00000000000000See git repository ('git log') for full changelog. Git repo can be found at: https://github.com/luke-jr/bfgminer The NEWS file contains most of the changelog bfgminer-bfgminer-3.10.0/HACKING000066400000000000000000000157161226556647300161740ustar00rootroot00000000000000Driver API ========== NOTE: This API is subject to change. It is recommended that you submit your driver, even if obscure, to the mainline BFGMiner codebase so that it will be updated when the API changes. BFGMiner defines 3 different units that drivers can use: - "Device" is a logical unit used for mining. It is represented by its first processor's `struct cgpu_info`. Example: ButterFly Labs MiniRig SC. - "Processor" is a logical work processing unit. It is represented by a `struct cgpu_info` and one or more `struct thr_info`. Example: a single board within ButterFly Labs MiniRig SC. - "Thread" is a sequence of instructions and stack that manages hashing on one or more Processors within a single Device. It is represented by a `struct thr_info`. It should be noted that while every Processor has a `struct thr_info`, this may not represent the same Thread which is managing hashing on the Processor. Instead, this `struct thr_info` is only used to store status information needed for the Processor, and is maintained by the managing Thread in addition to its own `struct thr_info`. New drivers are encouraged to use an asynchronous model to manage as many Processors as possible within a single Thread. struct device_drv basics ------------------------ Every driver defines a `struct device_drv`. The `dname` field contains a short name of the driver. This should consist only of lowercase alphabetic characters, and be the same name used in the source file: driver-foobar.c defines `dname` "foobar". The `name` field contains a three-letter abbreviation for the device, used in the representation of devices. For example, `dname` "FOO" would result in devices represented as "FOO 0", "FOO 1", etc and processors represented as "FOO 0a", "FOO 0b", etc. Drivers must define a function `drv_detect`, which is run at startup to detect devices. For each device (note: NOT each processor), it should allocate a `struct cgpu_info`, set some basic parameters on it, and call the `add_cgpu` function with it as an argument. Various values you can initialize are: .drv This MUST be set to your driver's `struct device_drv`! .deven Should be set to DEV_ENABLED .procs Number of Processors for this device .threads Number of threads your device needs - should be either a multiple of .procs (threads will be allocated to each Processor), or one (a single thread will be allocated only to the Device, to manage all Processors) .name Null-terminated name of the device itself `drv_detect` should return the total number of devices created. It should leave the device in an unused state, as the user may opt to delete it outright. Threads ------- The first interaction BFGMiner will have with a device is by calling the driver's `thread_prepare` function for each Thread. This occurs while BFGMiner is still in a single-threaded state, before any Threads have actually started running independently. It should do only the minimal initialization necessary to proceed, and return true iff successful. Once all the Threads are setup, BFGMiner starts them off by calling the `thread_init` function. This should do all initialization that can occur in parallel with other Threads. The driver should specify a `minerloop` to use. For the purposes of this document, it is assumed you will be using `minerloop_async`. Please note that the default is currently `minerloop_scanhash`, and much of the documentation here will NOT work with this `minerloop`. Processors ---------- Processors work with `struct work` objects, which each represent a block header to find a solution for. Before your driver sees a `struct work`, it will be passed to the function `prepare_work` with pointers to the Processor `struct thr_info` and the `struct work` as arguments. Most drivers do not need to do anything at this stage, so feel free to omit the `prepare_work` function. For each job, the `job_prepare` function is called in advance, with three arguments: Processor `struct thr_info *`, `struct work *`, and a `uint64_t` limiting how many nonces to check (starting from `work->blk.nonce`). Unless you implement a `can_limit_work` function, you will always receive a full nonce range from 0 to 0xffffffff. `job_prepare` increments `work->blk.nonce` to the last nonce the processor will be attempting and returns true when successful. Please note this will be called while the previous job is still executing. When it is time to actually start the new job, the `job_start` function will be called. This is given the Processor `struct thr_info *` as its only argument, and should start the job most recently prepared with `job_prepare`. Note that it is possible for `job_prepare` to be called for a job that never starts (another `job_prepare` may be executed to override the previous one instead). `job_start` must call `mt_job_transition` as soon as the actual switchover to the new job takes place, and must call `job_start_complete` when successful; in case of a failure, it should call `job_start_abort` instead. `job_start` must set `thr->tv_morework` to the time the device expects to need its next work item. It is generally advisable to set this a bit early to ensure any delays do not make it late. `job_start` is expected to always succeed and does not have a return value. Immediately before `job_start` is called to change from one job to the next, `job_get_results` will be called to fetch any volatile results from the previous job. It is provided the Processor's `struct thr_info *` and the currently executing job's `struct work *`. It should ONLY fetch the raw data for the results, and not spend any time processing or submitting it. If `job_get_results` is defined for your driver, it must (directly or indirectly) ensure `job_results_fetched` is called when complete (including the case of failure). After the new job has been started, your driver's `job_process_results` function will be called to complete the submission of these results with the same arguments, plus a bool to tell you whether the processor is being stopped. If it is, your driver must call `mt_disable_start` when it has successfully stopped hashing. Drivers may define a `poll` function. If this is defined, `thr->tv_poll` must always be set to a valid time to next execute it, for each Processor. Whenever a solution is found (at any point), the function `submit_nonce` should be called, passing the Processor `struct thr_info *`, `struct work *`, and nonce as arguments. If the solution is invalid (any of the final 32 bits of the hash are nonzero), it will be recorded as a hardware error and your driver's `hw_error` function (if one is defined) will be called. As often as results are processed, your driver should call the `hashes_done` function with a number of arguments: Processor `struct thr_info *`, count of hashes completed (including calls to `submit_nonce`), a `struct timeval *` that tells how long it took to find these hashes (usually time since the last call to `hashes_done`, and a `uint32_t *` which should usually be NULL. bfgminer-bfgminer-3.10.0/LICENSE000066400000000000000000000001451226556647300162000ustar00rootroot00000000000000BFGMiner is available under the terms of the GNU Public License version 3. See COPYING for details. bfgminer-bfgminer-3.10.0/Makefile.am000066400000000000000000000147571226556647300172450ustar00rootroot00000000000000# Copyright 2012-2013 Luke Dashjr # Copyright 2012 zefir # Copyright 2011-2013 Con Kolivas # Copyright 2013 James Z.M. Gao # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. See COPYING for more details. ACLOCAL_AMFLAGS = -I m4 EXTRA_DIST = \ m4/gnulib-cache.m4 \ linux-usb-bfgminer \ windows-build.txt dist_doc_DATA = \ AUTHORS COPYING HACKING NEWS README \ example.conf \ README.RPC rpcexamplesdir = $(docdir)/rpc-examples dist_rpcexamples_DATA = \ api-example.php miner.php \ api-example.c \ api-example.py SUBDIRS = lib ccan # Without a redirected rule, code depending on different lib/*.h files may try to build dependencies of that in parallel, which can fail lib/%: lib_directory @test -e $@ lib_directory: $(MAKE) -C lib ccan/libccan.a: $(MAKE) -C ccan $* INCLUDES = -fno-strict-aliasing bin_PROGRAMS = bfgminer bfgminer_LDFLAGS = $(PTHREAD_FLAGS) bfgminer_LDADD = $(DLOPEN_FLAGS) @LIBCURL_LIBS@ @JANSSON_LIBS@ @PTHREAD_LIBS@ \ @NCURSES_LIBS@ @PDCURSES_LIBS@ @WS2_LIBS@ \ @UDEV_LIBS@ @LIBUSB_LIBS@ @MM_LIBS@ @RT_LIBS@ \ @MATH_LIBS@ lib/libgnu.a ccan/libccan.a bfgminer_CPPFLAGS = -I$(top_builddir)/lib -I$(top_srcdir)/lib @LIBUSB_CFLAGS@ @LIBCURL_CFLAGS@ bfgminer_CPPFLAGS += -DCGMINER_PREFIX='"$(pkgdatadir)"' bfgminer_CPPFLAGS += @JANSSON_CFLAGS@ bfgminer_CPPFLAGS += $(PTHREAD_FLAGS) bfgminer_CPPFLAGS += $(NCURSES_CPPFLAGS) bfgminer_CPPFLAGS += $(AUTOSCAN_CPPFLAGS) bfgminer_LDADD += $(AUTOSCAN_LIBS) bfgminer_LDADD += $(libblkmaker_LIBS) bfgminer_LDFLAGS += $(libblkmaker_LDFLAGS) bfgminer_CPPFLAGS += $(libblkmaker_CFLAGS) # common sources bfgminer_SOURCES := miner.c bfgminer_SOURCES += miner.h compat.h bench_block.h \ deviceapi.c deviceapi.h \ util.c util.h logging.h \ sha2.c sha2.h api.c EXTRA_bfgminer_DEPENDENCIES = if NEED_LIBBLKMAKER SUBDIRS += libblkmaker EXTRA_bfgminer_DEPENDENCIES += libblkmaker_directory libblkmaker_directory: cd libblkmaker && $(MAKE) if HAVE_CYGWIN EXTRA_bfgminer_DEPENDENCIES += cygblkmaker-0.1-0.dll cygblkmaker_jansson-0.1-0.dll cyg%.dll: libblkmaker/.libs/cyg%.dll cp -p $< $@ endif endif bfgminer_SOURCES += logging.c if USE_UDEVRULES dist_udevrules_DATA = 70-bfgminer.rules endif if NEED_BFG_DRIVER_PROXY bfgminer_SOURCES += driver-proxy.c driver-proxy.h endif if USE_LIBMICROHTTPD bfgminer_SOURCES += httpsrv.c httpsrv.h driver-getwork.c bfgminer_LDADD += $(libmicrohttpd_LIBS) bfgminer_LDFLAGS += $(libmicrohttpd_LDFLAGS) bfgminer_CPPFLAGS += $(libmicrohttpd_CFLAGS) endif if USE_LIBEVENT bfgminer_SOURCES += driver-stratum.c bfgminer_LDADD += $(libevent_LIBS) bfgminer_LDFLAGS += $(libevent_LDFLAGS) bfgminer_CPPFLAGS += $(libevent_CFLAGS) endif if HAVE_OPENCL bfgminer_SOURCES += driver-opencl.h driver-opencl.c bfgminer_SOURCES += ocl.c ocl.h findnonce.c findnonce.h bfgminer_SOURCES += adl.c adl.h adl_functions.h bfgminer_SOURCES += *.cl kernelsdir = $(pkgdatadir)/opencl dist_kernels_DATA = $(top_srcdir)/*.cl dist_doc_DATA += README.GPU if HAVE_SENSORS bfgminer_LDADD += $(sensors_LIBS) endif endif if HAS_SCRYPT bfgminer_SOURCES += scrypt.c scrypt.h dist_doc_DATA += README.scrypt endif if HAS_CPUMINE dist_doc_DATA += README.CPU bfgminer_SOURCES += \ sha256_generic.c sha256_via.c \ sha256_cryptopp.c sha256_sse2_amd64.c \ sha256_sse4_amd64.c \ sha256_altivec_4way.c # the CPU portion extracted from original main.c bfgminer_SOURCES += driver-cpu.h driver-cpu.c if HAVE_SSE2 bfgminer_LDADD += libsse2cpuminer.a noinst_LIBRARIES = libsse2cpuminer.a libsse2cpuminer_a_SOURCES = sha256_4way.c libsse2cpuminer_a_CFLAGS = $(bfgminer_CPPFLAGS) $(SSE2_CFLAGS) endif if HAS_YASM AM_CFLAGS = -DHAS_YASM if HAVE_x86_64 SUBDIRS += x86_64 x86_64/libx8664.a: $(MAKE) -C x86_64 $* bfgminer_LDADD += x86_64/libx8664.a else # HAVE_x86_64 SUBDIRS += x86_32 x86_32/libx8632.a: $(MAKE) -C x86_32 $* bfgminer_LDADD += x86_32/libx8632.a if HAVE_SSE2 libsse2cpuminer_a_SOURCES += sha256_sse2_i386.c endif endif # HAVE_x86_64 endif # HAS_YASM endif # HAS_CPUMINE if NEED_BFG_LOWL_VCOM bfgminer_SOURCES += lowl-vcom.c lowl-vcom.h if HAVE_WINDOWS else bfgminer_SOURCES += iospeeds.h iospeeds_posix.h endif endif if HAVE_LIBUSB bfgminer_SOURCES += lowl-usb.c lowl-usb.h endif if NEED_BFG_BINLOADER bfgminer_SOURCES += binloader.c binloader.h endif if NEED_BFG_LOWLEVEL bfgminer_SOURCES += lowlevel.c lowlevel.h endif if NEED_DYNCLOCK bfgminer_SOURCES += dynclock.c dynclock.h endif if HAS_FPGA dist_doc_DATA += README.FPGA endif if HAS_ASIC dist_doc_DATA += README.ASIC endif if HAS_BITFORCE bfgminer_SOURCES += driver-bitforce.c if HAVE_WINDOWS else bin_PROGRAMS += bitforce-firmware-flash bitforce_firmware_flash_SOURCES = bitforce-firmware-flash.c endif endif if HAS_BIGPIC bfgminer_SOURCES += driver-bigpic.c driver-bigpic.h endif if USE_DRILLBIT bfgminer_SOURCES += driver-drillbit.c endif if HAS_TWINFURY bfgminer_SOURCES += driver-twinfury.c driver-twinfury.h endif if HAS_ICARUS bfgminer_SOURCES += driver-icarus.c icarus-common.h bfgminer_SOURCES += driver-cairnsmore.c bfgminer_SOURCES += driver-erupter.c bfgminer_SOURCES += driver-antminer.c endif if HAS_AVALON bfgminer_SOURCES += driver-avalon.c driver-avalon.h hexdump.c endif if USE_KNC bfgminer_SOURCES += driver-knc.c endif if HAS_KLONDIKE bfgminer_SOURCES += driver-klondike.c endif if HAS_MODMINER bfgminer_SOURCES += driver-modminer.c endif if HAS_X6500 bfgminer_SOURCES += driver-x6500.c ft232r.c ft232r.h jtag.c jtag.h endif if HAS_ZTEX bfgminer_SOURCES += driver-ztex.c libztex.c libztex.h endif if USE_BIFURY bfgminer_SOURCES += driver-bifury.c endif if HAS_BITFURY bfgminer_SOURCES += driver-bitfury.c driver-bitfury.h libbitfury.c libbitfury.h spidevc.h spidevc.c if HAS_BFSB bfgminer_SOURCES += driver-bfsb.c endif if HAS_METABANK bfgminer_SOURCES += driver-metabank.c tm_i2c.h tm_i2c.c endif if HAS_LITTLEFURY bfgminer_SOURCES += driver-littlefury.c endif if HAS_NANOFURY bfgminer_SOURCES += driver-nanofury.c mcp2210.c mcp2210.h endif if USE_HASHBUSTER bfgminer_SOURCES += driver-hashbuster.c endif if USE_HASHBUSTERUSB bfgminer_SOURCES += driver-hashbusterusb.c endif endif if USE_HASHFAST bfgminer_SOURCES += driver-hashfast.c endif if NEED_BFG_LOWL_HID bfgminer_SOURCES += lowl-hid.c lowl-hid.h bfgminer_CPPFLAGS += $(hidapi_CFLAGS) endif bin_PROGRAMS += bfgminer-rpc bfgminer_rpc_SOURCES = api-example.c bfgminer_rpc_LDADD = @WS2_LIBS@ bfgminer-bfgminer-3.10.0/NEWS000066400000000000000000012120571226556647300157020ustar00rootroot00000000000000BFGMiner Version 3.10.0 - January 15, 2014 - Downgrade official Windows build compiler to GCC 4.7.3. - Bugfix: Stratum: Accept JSON Number type for port number - Bugfix: proxy: Set start timer when creating new virtual devices - antminer: Add support for the Identify function - flashes LED 10 times - drillbit: Expand allowed external clock range to 0-255 - drillbit: Forbid setting external clock usage if not supported by device - Check for DBC_TEMP capability before trying to read temperature - Bugfix: drillbit: Reduce work message to correct size - README: Update documentation for new udev rules and "video" group - Bugfix: opencl/adl: Set iSpeedType for get-fanspeed requests, and ensure we don't change do something weird with the fan when initially setting user-defined speed flag. - Bugfix: drillbit: Initialise rv variable - Bugfix: Simplify adding "http://" prefix to avoid strncat overflow (length excludes null byte) - hashfast: Debuglog work flushing - hashfast: Implement OP_NONCE search flag - hashfast: Log seq numbers for nonces found - hashfast: Count hashes done by nonces found, rather than no-pending-work (which could be triggered by flushes) - hashfast: Just keep a queue of the 32 most recent work items per core - hashfast: Convert to minerloop_queue driver model - hashfast: Gracefully complain if we are given an unknown chip or core address - udev rule for hashfast devices - hashfast: New driver using UMS protocol - CRC-8-CCITT implementation - AUTHORS: Add Lingchao Xu and move nwoolls up to antminer driver (and mention TwinFury driver for Andreas) - knc: Workaround false compiler warning about "uninitialised" vars - Bugfix: drillbit: Access fd after potentially reopening - Remove Christmas colouring - drillbit: Add udev rule - drillbit: Correct configure logic to check for generic bitfury code (needed to decode nonces) - drillbit: Implement some basic problem recovery - drillbit: Support identify command - drillbit: Read/write access to clock and voltage configuration from RPC and ManageTUI - drillbit: Store board configuration - drillbit: Read temperature sensor - drillbit: Check nonces against prev work - drillbit: Implement mining - drillbit: Only detection code - antminer: Initial support for the Bitmain AntMiner U1 ASIC Includes support for identifying the U1 separately from Icarus and Block Erupter Also includes overclocking via --set-device antminer:clock=xHEX - Extend horizontal lines to full screen width - Log devid for USB string request failures - Bugfix: segmentation fault if the terminal window is too narrow for the Help and Quit items - Accept "address" spelled out in --coinbase-addr option - Bugfix: document the need to package zlib1.dll in the Windows build instructions - Bugfix: Stratum: Re-read pool sock var after suspend+restart - Silence false uninitialised var use warning and calculate dev_runtime only once - Bugfix: HID API not properly detected on Mac OS X - Adjust device list size as necessary when accessing options - Avoid erasing the screen when statusy is not changing - Abstract common set_statusy code out of change_logwinsize and check_winsizes - TUI: Support pgup/pgdown for scrolling device list by page - Bugfix: icarus: quirk_reopen is an int - Bugfix: Do not allocate spi_port on the Stack, even to initialize - EXC_BAD_ACCESS on OS X - get_statline3: Simplify statistics gathering - Bugfix: twinfury: Use serial number formatted over USB, so it works with --scan - twinfury: Only debuglog temperature debugging data when --device-protocol-dump is enabled - Bugfix: twinfury: Populate temperature info on both processors - Option --weighed-stats to display A and R values weighed by difficulty - README.GPU: Document always-disabled-by-default for OpenCL driver - AUTHORS: Add Nate Woolls - Extend menu to full width of window - Abstract out spaces-to-eol to bfg_wspctoeol function - Elaborate on spi_port+stack problem in comments - Bugfix: Do not allocate spi_port on the Stack - EXC_BAD_ACCESS on OS X - Bugfix: don't attempt to probe Bluetooth devices when scanning hardware - x6500: Allow overriding the maximum frequency used by the dynclock logic Can now use e.g. --set-device x6500:maxclock=210 Prevents spending time on frequencies that only produce HW errors - HACKING: Clearly document that dname must be lowercase and alphabetic - bifury: Tolerate corruption in submit message, remapping shares to the first processor if chip id is unrecognised - bifury: Tolerate corruption in hwerror message - bifury: Tolerate corruption in job message, and only count hashes done when completing a known job - Use a lowercase driver name to fix --scan pattern matching Otherwise the following doesn't work: -S noauto -S twinfury:auto BFGMiner Version 3.9.0 - December 25, 2013 - Update official Win32 build compiler and library: - - Upgrade GCC from 4.8.1 to 4.8.2 - - Upgrade libcurl from 7.28.1 to 7.34.0 - Update official Win64 build compiler and library: - - Upgrade GCC from 4.7.3 to 4.8.2 - - Upgrade mingw64-runtime from 2.0.8 to 3.0.0 - Green-on-red title colours for Christmas release - write_config: Include http-port and stratum-port options - Interpret F1 as a request for Help - Bugfix: SSM: Free old _ssm_notify before replacing it - Bugfix: SSM: Clean _ssm_cur_job_work as needed to avoid memory leaks - Support matching --scan with lowlevel devid - cgpu_match: Unit test for USB device path matching - Bugfix: cgpu_match: Handle digits in dname (x6500) - cgpu_match: More unit tests (dname with digit) - cgpu_match: More unit tests (dname and case insensitivity) - Display "NO DEVICES FOUND" line in place of device list, when there are none - bitfury: Use drv_set_defaults to enable setting baud before probe - bitfury: Split out SPI port configuration option ("baud") to its own function - drv_set_defaults wrapper function around cgpu_set_defaults for use with options that may need to be set during probe - bitfury: Set poll interval to start iteration before responses are processed - modminer: Check identification begins with "ModMiner" to avoid false detection - Bugfix: hashbusterusb: Correct return value of hashbusterusb_vrm_unlock - Support for installing a udev rules file for Linux - twinfury: Remove unused variable to silence warning - cgpu_request_control should be a noop when called from the main thread - Bugfix: Handle errors creating a vcom devid more gracefully - Bugfix: _wlog: Allocate enough space for complete copy of log line - bfsb: Remove unused clock_gettime - Bugfix: bfsb: Remove useless slot_on which was never properly initialised - Bugfix: When QueryDosDevice fails, skip trying to parse its (undefined) results - hashbusterusb: Voltage should be in volts (not millivolts) for RPC - hashbusterusb: Provide access to VRM stuff from RPC - hashbusterusb: Use cgpu_request_control interface to safely access device from outside main thread - hashbusterusb: Include Voltage in RPC stats - Bugfix: hashbusterusb: Ensure unlock code is always allocated, even if null - hashbusterusb: Abstract code into hashbusterusb_vrm_lock - hashbusterusb: Abstract code into hashbusterusb_vrm_unlock - hashbusterusb: Abstract code into hashbusterusb_set_voltage - Bugfix: hashbusterusb: Check for voltage change error correctly - Abstract mutex_request code from X6500 driver into generic device API interface - hashbusterusb: Use standard identification behaviour - hashbusterusb: Abstract hashbusterusb_set_colour function - hashbusterusb: Get voltage with temperature - hashbusterusb: Clean up unused variable warnings - hashbusterusb: Use bitfury_wlogprint_status for osc6_bits displaying in Manage TUI - Bugfix: hashbusterusb: Remove ignored prompt for VRM lock - hashbusterusb: Use Manage/osc6_bits code from main bitfury driver - hashbusterusb: Provide access to VRM and identification in Manage TUI - hashbusterusb: Shutdown PSU - nanofury: Support identify function by turning off LED for 5 seconds - nanofury: nanofury_state structure - bitfury: Set poll interval to start iteration before responses are processed - Twinfury: moved voltage reading to the thread init function - Twinfury supply voltage initial reading: error log improved - Twinfury: Reading supply voltage on startup - Voltage scaling for twinfury implemented BFGMiner Version 3.8.1 - December 9, 2013 - bfgminer-rpc: Catch error when server host fails to resolve to an IP - RPC: Remove unnecessary delay from RPC server startup - Call WSAStartup for our own needs independently of libcurl etc - hashbusterusb: Give more meaningful errors before serial number is known - hashbusterusb: Populate device_path with USB devid - Rename hashbuster2 to hashbusterusb (only a-z allowed in driver names) - Include libusb in options list, since it is no longer tied to specific drivers - Make hashbuster serial number output match formatting on physical board - Fix for hashbuster first init after power up - Workaround Microsoft's non-standard swprintf - vcom: Fabricate vcom devinfo for any existing paths specified to --scan, in case enumeration fails - Bugfix: hashbuster2: Check for errors setting up libusb handle - Bugfix: Draw statuswin in line order to ensure overflow is cutoff properly - Fixed one byte stack overflow in mcast recvfrom. - Bugfix: Let libc do any translation for %lc before adding wide characters to curses - Specifically handle mining.get_transactions failures so they get logged at the lower debug loglevel - Bugfix: lowlevel: Use LL_DELETE2 when cleaning up secondary list BFGMiner Version 3.8.0 - December 1, 2013 - Bugfix: lowl-usb.h: Add missing includes for stdbool/stdint - hashbuster2: Retry writing request if no response received in 100ms - lowl-usb: usb_ep_set_timeouts_ms function that behaves similar to termios VTIME (timeout before begin of read/write) - hashbuster2: Use new lowl_usb_endpoint for i/o - lowl-usb: Add tools for more reliable read/write using bulk transfers - Add bytes_extend_buf, bytes_preappend, bytes_postappend for reading direct to a bytes_t - New / updated instructions for building bfgminer on Windows - configure: Adjust header-path discovery to work sanely with spaces and backslashes in paths - Bugfix: configure: More fixing BSD sed syntax for curses header search - bitfury: Reinitialise chips if their frequency drops over 50% - bfg_strerror: Trim C whitespace off the right of FormatMessage output, since Windows can add \r\n - Demote commonish errors to debug loglevel - klondike: Add support for Avalon2-based designs - klondike: Remove unnecessary limit of 999 on clock speed displayed in Manage Device - Bugfix: hashbuster2: Claim USB device internally - Bugfix: hashbuster2: Do not try to use lowl-hid.h - Bugfix: hashbuster2: Close device and free memory allocated, when no chips are found - Bugfix: hashbuster2: Remove libusb init and debug level setting - Bugfix: hashbuster2: Avoid add_cgpu when no chips found - Bugfix: klondike: Claim USB device internally - Bugfix: devpath_to_devid: Check stat for error before allocating memory - Bugfix: lowlevel: Free hash table data when done with it - Bugfix: bifury: Free memory used by version reply - bfgtls: Implement destructor to free memory when threads exit - hashbuster2: Remove unnecessary libusb_detach_kernel_driver - hashbuster2: Read unique serial number from device - hashbuster2: Use hashbuster2_io for probe - hashbuster2: Clean up warnings about unused variables - README: Document --enable-opencl configure option - README: Update for HashBuster Micro - hashbuster2: Integrate into BFGMiner as a separate driver - hashbuster2: Import driver for new protocol - Bugfix: nanofury: Check if mcp2210_device exists before trying to power it off - Bugfix: lowl-vcom: Check for error getting root hub path - openwrt: Update package description - configure: Add --without-libusb option to explicitly disable using it - Bugfix: Pass newlines as ASCII (fixes lack of them in non-unicode builds) BFGMiner Version 3.7.0 - November 27, 2013 - FTDI scan: Use intptr_t to avoid warning about different size type cast - Bugfix: proxy: Add missing unistd.h include - Bugfix: bifury: Precision specifications should be int, not size_t - Bugfix: klondike: Include config.h first - Bugfix: binloader: Include config.h first - README.ASIC: Include support URI for bi*fury driver and firmware - Added MultiMiner as an example of a GUI application that wraps bfgminer using the RPC interface - Silence non-issue warnings - bifury: Add support for hwerror message - inc_hw_errors2: Support reporting hw errors without a known nonce - bifury: Initialise cutofftemp to 75 C - bifury: Turn chips off and on when disabled and enabled - bifury: Split out bifury_send_clock - Bugfix: bifury: Avoid double-free and double-close on version parse errors during probe - bifury: Add support for clock command - bifury: Update for "job" protocol change - bifury: Update to current git - bifury: Workaround bugs in older firmware by flooding device with work until it proves it has needwork - bifury: Attempt to submit shares even if maxroll limit is ignored - Clone cgminer's submit_noffset_nonce interface - Bugfix: bifury: Check that there is a line before comparing it to "version " - bifury: Prune jobs older than 16 queued, to eventually clean up discarded work (from flushes) - Replace ucs2tochar* with ucs2_to_utf8* to handle Unicode - README.ASIC: Document Bi*Fury firmware upgrade - bifury: Free work when we know it's complete - bifury: Implement hashrate reporting using "job" messages - bifury: Tolerate other data before version reply - bifury: Abstract line parsing into bifury_readln function - Bugfix: bifury: Use a char array, rather than pointer to char array, for bifury_init_cmds - bifury: Unify SEND devprotodump logging code - fpgautils: Skip SetCommConfig on Windows if baud is not to be set - Check for U+2022 and replace it with "*" is unprintable - Use replacement character for non-printable Unicode - Use UTF-8 for Unicode - Implement a utf8_decode function to produce wchar_t needed by curses - bifury: New driver - Bugfix: twinfury: Handle flash request for both chips, and avoid using proc uninitialised - Bugfix: twinfury: Fix various technicalities - Bugfix: twinfury: Remove incorrect tcflush - Twinfury communication protocol changed to be more robust. A preamble is sent before the actual command. - LED flashing changed: - setting a flag and flash the LED during the poll cycle - mutex removed - Renamed bf2 driver -> twinfury driver bin2hex utility used to convert the serial number - BF2 driver comment edited - BF2 - Twin Bitfury USB miner driver added - Deprecate --ndevs|-n (redundant with -d? and GPU-only) - Never abort startup just because we have no devices (there may be cases this is pointless, but at least it's consistent) - cgpu_match: Accept ranges for letter processor specification - cgpu_match: Accept ranges for numeric elements - test_cgpu_match: Test with more than one device/processor - configure: Condense configuration options summary a bit - configure: Put configuration options summary back after everything completes - Workaround bug in MinGW-w64: Check for strtok_r to be defined at compile-time, and don't override it unless we really want to override - Bugfix: configure: Use new need_lowl_vcom instead of need_fpgautils - configure: Explicitly check for sleep - Bugfix: Set LIBUSB_LIBS for non-pkgconf non-header-subdir libusb installations - Move unused winsock2.h includes to the end of config.h - Use lround instead of llround, since it has better compatibility with older MingW and Cygwin versions - lowl-hid: Save and use hid_enumerate results from library test - Silence Windows build warnings - mcp2210: Remove cruft leftover from HID code (moved out) - lowl-vcom: sysfs scan: Simplify maybe_strdup into _sysfs_do_read - lowl-vcom: sysfs scan: Don't bother getting product string until we confirm it is in fact a tty device - input_pool: Accept a null password - Bugfix: Move const data into the scope it needs to be in - Bugfix: Restore case insensitivity to --scan driver names - Accept "@" separator for serial/path in --scan to match --device and --set-device better - README: Uppercase the sequence letter when talking about multiple pools - README: Mention "make install" and clarify saving settings - Bugfix: avalon: Don't "probe" with -S - bitforce: Add lowl_* to bitforce_queue so it can be specified by name as a driver - Bugfix: devpath_to_devid: Be strict about COM port specifications on Windows - Bugfix: avalon: Don't "probe" with -S all - Bugfix: Only count devices in -d? - RPC: {dev,proc}details: Add 'Processors', 'Manufacturer', 'Product', 'Serial', 'Target Temperature', 'Cutoff Temperature' - debian: Need to explicitly add opencl support to build. - Correctly document klondike as being enabled by default - Simplify logic for README.ASIC and README.FPGA install - Split fpgautils into lowl-vcom and binloader, and fix internal build dependencies - Add README.CPU to "make install" distribution when built with CPU mining support - line 2913 added urlencode - Bugfix: RPC: Release apisock on error in tidyup - klondike - dont try to flush if not initialised - Rename --scan-serial to --scan - Bugfix: Handle invalid driver names cleanly - Workaround libcurl bug to properly support HTTP proxies for stratum - Bugfix: Write klondike options for klondike-options (not icarus options) - README.OpenWrt: Change example repository to "latest", and document alternatives - mcp2210: Add support for --device-protocol-dump - Bugfix: Initialise dynamic osc6_bits variables - README: Fixed a few typos here and there and improved wording (Thanks miningpenguin!) - README: Moved CPU sections to README.CPU and elaborated more on it - README*: Grammatical cleanups - README.ASIC: More KnCMiner - README.ASIC: Add section about KnCMiner - Bugfix: opencl: Dynamic mode should disable threads for this device, not just N after the first - Bugfix: Make curses_input return NULL on a blank line, as most callers expect - Duplicate (hidden) --userpass option before --pool-priority so it can be used in config files - Only probe VCOM devices on bare "all" scan-serial request - Provide backports of uthash LL_{PREPEND,FOREACH{,_SAFE}}2 (only added in 1.9.7, not yet in major distros) - Upgrade required uthash version to 1.9.4 for LL_CONCAT - README: Update documentation for scan-serial - README: Bitstreams no longer included with BFGMiner source - README: Remove incomplete and unnecessary list of ASIC/FPGA devices - write_config: Add scan-serial and set-device - write_config: Update to make JSON Array of named parameters - Remove support for comma-separated values in --device, and update documentation - lowlevel: Collapse multiple lowlevel drivers on the same devid to a secondary linked list, and only spawn one thread to probe all lowlevels on a given devid - Bugfix: vcom_devinfo_scan: Clean up hashtable when done - README: Update cgminer driver FAQ with Windows-specific answer, now that we have a workaround for *nix - cpu/opencl: Always disable by default; removes deprecated -G option entirely - vcom_lowl_probe_wrapper: Attempt to reattach detached USB devices - Bugfix: erupter: Prioritise Emerald after Sapphire - Bugfix: Allow "drv:auto" to override general "noauto" - Bugfix: _probe_device_match: Fix inverted logic - Bugfix: Don't try to call lowl_probe for drivers that don't support that interface - lowl-hid: Use usb devids for libusb-based hidapi devices - Replace bfg_claim_hid with lowlevel_claim - lowlevel: Generic claim interface for lowlevel info - cgpu_match: Compare dev_t under paths - Rework -d? output to display more useful information now that device ordering is non-deterministic - cgpu_match: Add support for matching by serial number or path - Unit tests for cgpu_match - Rework --device option to accept a pattern and JSON Array in config; removes support for processor ranges - Remove --remove-disabled option, as it conflicts with hotplug, per-processor stuff, etc already - Bugfix: Reimplement "noauto" flag - Bugfix: Restore "-S foo" without driver name functionality - Bugfix: scan-serial: Compare dev_t as well - littlefury: Convert to lowl_probe - hashbuster: Convert to lowl_probe - avalon: Convert to lowl_probe - cairnsmore/erupter/icarus: Convert to lowl_probe - bigpic: Convert to lowl_probe - ztex: Convert to lowl_probe - x6500: Convert to lowl_probe - nanofury: Convert to lowl_probe - modminer: Convert to lowl_probe - klondike: Convert to lowl_probe - bitforce: Convert to lowl_probe - fpgautils: Wrapper to easily adapt an old detectone to new lowl_probe - lowlevel: Add match variants of detect functions - DevAPI: add_cgpu: Use mutex to ensure thread safety - Make detectone_meta_info thread-local - New threaded device probe framework BFGMiner Version 3.6.0 - November 12, 2013 - RPC: Bump to 2.2 for Works in POOLS - Bugfix: klondike: Don't try to free off the stack - configure: Update klondike checks for libusb - klondike: Autodetect by VID/PID/Manufacturer, rather than too-short "K16" Product search - Remove accidentally added ASIC-README - klondike: Remove noop identify function - klondike: Replace deprecated statline with temperature and ManageTUI stuff - --shares should be scaled to diff1 not absolute number of shares - More README updates. - Minor README updates. - sha2 allow external access to some macros and the K array - klondike: Fixed a math issue when reporting fan speed on the status line. - Add a get and queue helper work function. - Reset the work_restart bool after the scanwork loop in case the driver flushes work synchronously. - Get rid of the stage thread since all work can be asynchronously added now via hash_push anyway. - Fix for opt_worktime on big endian machines. - Do get_work in fill_queue without holding other locks. - Make hash_pop signal the work scheduler each time it waits on the conditional that it should look for more work. - Remove discarded work from quota used. - Display works completed in summary and API data. - Store how many work items are worked on per pool. - Add the ability to add uint8 and uint16 entities to api data. - klondike - initialise stat_lock - klondike - better to unlock locks than to lock them twice :) - Remove roundl check and define - 'llround' is more suitable here than 'roundl' - klondike - change options to clock and temptarget only - klondike - fix another uninit dev warning - klondike - downgrade 'late update' but add an idle detect - and correct error levels - klondike - fix isc uninit warning - klondike - drop the device for hotplug if it's unresponsive - klondike - single 'shutdown' and ensure it happens - klondike remove SCNu8 - unsupported on windows - klondike - fix uninitialised dev bug - Don't attempt to disable curses or print a summary during an app restart to prevent deadlocks. - klondike - error condition handling - Modify Makefile to only include opencl related code when configured in. - Convert opencl to need to be explicitly enabled during build with --enable-opencl - Implement a cglock_destroy function. - Implement a rwlock_destroy function. - Implement a mutex_destroy function. - Simplify queued hashtable by storing unqueued work separately in a single pointer. - Add cgminer compatibility macro for ms_tdiff - klondike rewrite work control - allow __work_complete() access - miner.h allow devices to tv_stamp work - klondike - can only calculate the nonce difference on or after the 2nd nonce - klondike - correct/reverse min/max stats - klondike: Remove unnecessary devlock - klondike - use a link list queue rather than a circular buffer - and add timing stats - Klondike - increase circular read buffer size - Klondike - extra zero value and range checking in temp conversion - klondike - display MHz also - klondike correct cvtKlnToC() temperature calculation - klondike - correct 1st reply debug based on define - klondike - debug dump structured replies - klondike - avoid division by zero if maxcount is unexpectedly zero - klondike store and report errorcount and noise - klondike - fix chipstats api stats buffer overrun with 16 chips - klondike add new nonecount only once - klondike - report mh/s based on nonces found + put old estimate into API stats - klondike use a memcpy - klondike fix bracket tabs indenting - klondike: Update code to current git - Klondike update code to current git - Add Klondike to README - Add Klondike to README.ASIC - Klondike to main directory - Klondike consistent code spacing - Klondike update driver code to current git - klondike: update firmware for 16 chips, add dist files - klondike: beta final 0.3.0 release - klondike: updated firmware, IOC method - klondike: prevent nonces when not state W - klondike: added driver config option support - klondike: fixes for 300 MHz, fix K1 parts list - klondike: update driver, docs - klondike: update firmware & utils - klondike: updated cgminer driver for 3.3.1 - klondike: update firmware and driver, create new cgminer fork - update klondike driver - klondike: add cgminer driver file as-is BFGMiner Version 3.5.2 - November 12, 2013 - README.scrypt: Update to reflect current status of code (unmaintained); remove Con's litecoin donation address (leaving his bitcoin one) since it is unknown if he still accepts donations with litecoin - Bugfix: minerloop_async: Check the correct _mt_disable_called flag - bitforce: Allow ZCX response to override Manufacturer string - Bugfix: RPC: Restore null termination on responses - Bugfix: configure: We need DLOPEN_FLAGS for lowlevel hid too - Add additional debug information to help track work through BFGMiner - README: Update hidapi dependency for HashBuster - Bugfix: bigpic: Convert device serial and nonces to host endian - Bugfix: modminer: Ensure devices that fail probe are closed properly - Bugfix: bitforce: Ensure devices that fail probe are closed properly - Bugfix: littlefury: Ensure devices that fail probe are closed properly - Bugfix: bigpic: Ensure devices that fail probe are closed properly - nanofury: Attempt to be more resilient to problems BFGMiner Version 3.5.1 - November 7, 2013 - Bugfix: make-release: Add README.GPU document - Demote USB string fetch failure to LOG_DEBUG since it is rather common - Bugfix: RPC: devscan shouldn't be available to read-only access - Bugfix: bigpic: Ignore the bitfury chip's counter as best we can - Bugfix: bigpic: Accept other delimiters in Bitfury BF1 product string in autodetection - Bugfix: Fix VCOM/fpgautils build check - Bugfix: Only include VCOM code when VCOM/fpgautils support is being built - Bugfix: Reimplement scan-serial "all" keyword - Bugfix: bitforce detect: Close device after ZGX failure - Bugfix: Define lowl_usb even if libusb support is omitted, since VCOM and HID drivers check against it for warnings BFGMiner Version 3.5.0 - November 6, 2013 - bigpic: Probe sooner than most drivers, but still after icarus/erupter - cpu/opencl: Fix probe priorities to be last - Bugfix: RPC: Correct per-device Work Utility to be per-minute instead of per-second - Bugfix: Fix Windows VCOM enumeration to work with new lowlevel code - Bugfix: nanofury: Use maybe_strdup in case strings may be missing - Bugfix: bitfury: Fix processor disable/enable for all bitfury-based devices - hashbuster: Get temperature when available - Bugfix: hashbuster: Use maybe_strdup where there might not be a string - hid: Treat null-length strings as missing entirely - hashbuster: Enable dynamic osc6_bits - hashbuster: New driver - Split a hid lowlevel out of mcp2210 - ztex: Convert to use lowlevel usb enumerator - A quick flag to allow drivers to convey the desire to rescan devices (from lowlevel up) - Lowlevel USB enumerator for libusb - dname for lowlevel drivers - Bugfix: lowlevel: Avoid null pointer dereference on devices that are missing USB strings - lowlevel: Add a userpointer to lowl_found_devinfo_func_t and use it for serial_autodetect - Rework VCOM autodetection to go through lowlevel interface, avoiding repeat USB string fetching - lowlevel: Keep track of the claimed devid and debuglog every found device - ft232r: devinfo_scan: Don't skip claimed USB devices - ft232r: Be more tolerant of USB open failure or missing strings - RPC: Build most of CONFIG "Device Code" by iterating over registered driver list - drv_detect_all: Use priority-sorted list of registered drivers - Build hash tables to find drivers by dname or name - Build a linked list of registered drivers - BFG_REGISTER_DRIVER macro to commonly pre-define device_drv structures - Move have_libusb checks out of drv_detect_all - Replace opt_nogpu with simple function that adds opencl:noauto to scan-serial list - opencl: Remove ancient gpu_threads variable, not really needed anymore - Bugfix: Stop waiting for pool testing as soon as one is active - bitfury: Clean up by making private functions static, and naming public ones with [lib]bitfury_ prefix - bitfury: Remove old (unused) driver code - bitfury_gpio: Convert to using current bitfury driver code - Bugfix: littlefury: Upgrade to use current bitfury driver (old one didn't work with littlefury anymore anyway) - Bugfix: test_work_current: Make hexstr buffer large enough for blkhashstr later on (issuing old work msg) - README: Document --enable-knc configure option - README: Document --disable-nanofury configure option - Windows autodetect: clear detectone meta info - Check for Windows DDK headers, and where they are (and build without them if unavailable) - Windows SetupAPI-based detection for USB VCOM devices (tested with CP210x and CDC) - ucs2tochar{,_dup} functions to convert USB string descriptors to char* format - bfg_strerror: Support for Windows system errors - Introduce applogfail* macros - Bugfix: scrypt: Add missing include stdbool.h - Bugfix: Explicitly cast uses of be32toh to uint32_t - Bugfix: Add missing includes of config.h, stdbool.h, and stdint.h - lowlevel: Convey device manufacturer up to nanofury cgpus - nanofury: Store device HID path for RPC - lowlevel: Promote path to a normal element for devices to simplify things a bit - nanofury: Claim mcp2210/hid devices - mcp2210: Implement claim on HID paths - fpgautils: Use string identifiers for internal hardware claims - ztex: Keep interface claimed as a lock - Bugfix: mcp2210: Check for NULL hid device strings before trying to convert them to ASCII - FTDI autodetect: Enable populating USB product and serial number - Bugfix: FTDI autodetect: Add missing parameter values and remove unused buffer - Bugfix: bigpic: Don't active poll event without a poll function BFGMiner Version 3.4.0 - October 26, 2013 - Bugfix: knc: Use separate DEV_RECOVER_DRV deven when doing automatic core disable/re-enable, so user-initiated disables are left alone - Document existing enum dev_enable usage - Bugfix: knc: Only try to re-enable cores after we've actually asserted them as disabled, otherwise we won't actually assert the enable either - knc: Automatically disable and re-enable problematic cores - AUTHORS: Add Dmitry Sorokin - bitfury: Disable dynamic osc6_bits when manually setting a specific value - bitfury_do_io: Make use of timer_elapsed - bitfury: Only use dynamic osc6_bits with boards that support it explicitly (BFSB and Metabank for now) - bitfury: More debugging info for dynamic osc6_bits - Bugfix: bitfury: Fix tv_stat to persist - bitfury: Remove useless debugging - bitfury: Simplify dynamic osc6_bits logic such that the range is defined in one place - bitfury: Remove unused skip_stat - Bugfix: bitfury: Stick to tracking best_done per-chip, and never call zero_stats (which is for users, not drivers) - Bugfix: bitfury: Use proc_repr rather than incorrectly calculated chip_id - Bugfix: bitfury: struct bitfury_device is already chip-specific - bitfury: dynamic osc_6 bits - bitfury: Improve frequency calculation - fixes occasional miscalculations as well as makes it safer with multiple devices - bitfury: Added frequency calculation - tm_i2c: Corrected copyright - Allow --set-device opt=val without a device specification to attempt setting it on every device - Bugfix: knc: Maintain queue_full for all cores, so they continue to queue work even if the first is disabled - bitfury: Allow setting osc6_bits up to 60 - Bugfix: Set status to LIFE_DEAD2 when killing threads at shut off, to avoid calling driver code - Skip "testing stability" message at startup - Try switching pools if for some reason we end up with only idle pools and have ended up current_pool set to an idle one. - Check a pool is stable for >5 mins before switching back to it. - ManageTUI: Slash key performs processor selection by search - nanofury: Populate dev_product and dev_serial - mcp2210: Implement mcp2210_close - Bugfix: mcp2210: hid_write needs a report id, especially on Windows - mcp2210: Workaround some Windows snprintf not handling wide strings correctly by simply copying character-per-character (only works for ASCII data) - Bugfix: chroot: Check that chdir succeeds - AUTHORS: Add pontus - knc: Expose Voltage and DCDC Current to TUI Manage device - knc: Expose Voltage and DCDC Current to RPC status - knc: Store volt/current on knccore - knc: Read voltage/current from DCDCs - Bugfix: knc: Fix temperature readout of half-celcius - Bugfix: Only define have_libusb if building with libusb support - littlefury: Remove unused code - bitfury: RPC procset: Ignore extra data (like newline) after numbers - Add --set-device option to allow setting default procset commands - README: Document hidapi dependency for NanoFury sticks - AUTHORS: Include Vladimir Strinski for NanoFury code - mcp2210: Set GPIO output mode atomically with their output values - nanofury: Power off device when shutting down - make-release: Include libhidapi-0.dll - mcp2210: Port to Windows - configure: Find hidapi.h for mcp2210 using pkg-config - bitfury: Provide read/write access to osc6_bits from the Manage TUI - nanofury: Reduce oscillator bits to 50 to stay within USB specs - bitfury: If multiple integers differ after 4 tries, just go with the first so we can start mining - nanofury: Implement actual mining code - nanofury: Turn off device after checkport - nanofury: New driver; detect device - mcp2210: Implement protocol required for NanoFury USB sticks - mcp2210: Skeleton low-level driver for MCP2210 USB to SPI Master bridge - ft232r: Use lowlevel interface for ft232r_open - Abstract ft232r scan+probe interface into new generic lowlevel driver interface - configure: Error if knc driver is requested, but linux/i2c-dev.h is missing or not from i2c-tools - knc: Attempt to express core enable/disable to controller - knc: Put knc_device on every processor device_data - Refactor device disable/enable logic so that drv.thread_{disable,enable} actually get called - Bugfix: knc: Check that device actually has work queued, before trying to find the most recent one - knc: Read temperature sensors - DevAPI: minerloop_queue: Run watchdog in device thread - knc: Pass queue flush to device when its most recent job produces stale shares - knc: Issue flush command at init - Bugfix: knc: Perform le32toh on unknown-work nonces - knc: Use independent device_id for works to enforce 15-bit size - Bugfix: spi_emit_nop: Correct counter logic - knc: Implement mining - knc: Use FPGA i2c to identify present ASICs - knc: Basic detection of boards on expected i2c buses - DevAPI: generic_detect: Just use an enum for flags - Use list of drivers/algos/options generated by configure in --help info - miner.php correct sort gen field names largest to smallest - api ... the code related to device elapsed - api add device elapsed since hotplug devices Elapsed is less than cgminer Elapsed - RPC: Include more info in per-device/processor status - API add 'MHS %ds' to 'summary' - Icarus remove unneeded opt_debug tests due to applog being a macro - We should only yield once in cg_wunlock - Provide a function to downgrade a cglock from a write lock to an intermediate variant. - Reset quotas on load balance for all pools at the same time to avoid running out during selection and unintentionally dropping to fallback. - Break out of select pool from a common point for appropriate debug messages and to avoid further tests. - Find the greatest common denominator in quotas and use the smallest number of consecutive work items per pool in quota load balance mode to smooth hashrate across pools with large quotas. Give excess quota to priority pool 0 instead of pool 0. - Add subdir-objects to automake options. - Use the --failover-only flag to have special meaning in combination with load-balance mode to distribute any unused quota back to pool 0 to maintain ratios amongst other pools. - Display quota and allow it to be modified via the pool menu. - More README about quotas. - Document quotas and new load-balance strategy. - Add API commands and modify output to support pool quota displaying and changing. - Change message in status when using a balanced pool strategy to notify if there's a stratum pool as well. - Add quota support to configuration files. - Rotate pools on all failures to set a pool in select_pool. - Use quotas for load-balance pool strategy. - Provide a mechanism for setting a pool quota to be used by load-balance. - Change --socks-proxy option to default to SOCKS5 - Cope with trailing slashes in stratum urls. - Make extract_sockaddr set variables passed to it rather than pool struct members. - miner.php sort the mcast rigs so they are always in the same relative order - miner.php allow sending the muticast message multiple times - miner.php mcast ignore duplicate replies - miner.php coding warning - miner.php disable 'gen' by default - miner.php allow formula generation of new fields - miner.php add doctype - miner.php remove incorrect echo - miner.php optional error if not enough mcast rigs are found - take_queued_work_bymidstate should use a write lock. - API mcast add a description option with miner.php - Skip dissecting opt->names in parse_config if it doesn't exist. - ICA optional limit timing with short=N or long=N - logging - applogsiz() for large messages - Provide a funcion that looks up queued work by midstate and then removes it from the device hash database. - Wait until all pools are done testing before giving up, regardless of how long they take to fail - AUTHORS: Add Ricardo Iván Vieitez Parra and Paul Wouters - Bugfix: Check that setgid succeeds - When using --chroot, chdir to the new root - Bugfix: Add missing drivers to --help list - Bugfix: Zero stats: cgpu->diff_stale - fpgautils: serial_close: Explicitly release advisory lock before closing, in case fd has been inherited by a process forked by system() - Use serial_close to pair with every serial_open - Workaround bug in Plibc by resetting locale to "C" at startup - Bugfix: bitfury: Check for necessity of linux/i2c.h so build succeeds with i2c-tools's userspace linux/i2c-dev.h - bitforce: Workaround bugs in current firmware for the Chili - Bugfix: DevAPI: Reduce race-collision delay to 1ms so thread_shutdown actually gets called before threads are killed forcefully - Bugfix: configure: Use AC_SYS_LARGEFILE to ensure nonce/share log files can safely grow bigger than 2 GB - Bugfix: Correct argument type for --submit-threads option - littlefury: Workaround Xcode bug initialising fields in anonymous unions - Bugfix: write_config: Make intensity list writing more readable, and avoid extraneous argument in dynamic intensity case - DevAPI: Trigger mt_disable_start after init, if a device is disabled before minerloop starts - icarus: Skip sending new work if entering DEV_RECOVER* modes - icarus: When disabling, close device fd and stop sending new work - RPC: Use get_api_extra_device_status for full-device status, for devices with only a single processor BFGMiner Version 3.3.0 - October 11, 2013 - openwrt: Optional libevent support - RPC: Add missing drivers to Device Code - bigpic_process_results: Cleanup - RPC: Use procs count for device summaries, rather than iterating over linked list (which may span multiple devices) - Bugfix: Use bfg_waddstr for cg_[mv]wprintw so special characters get interpreted properly - Bugfix: bitfury: Clear force_reinit flag after reinit - Bugfix: Use base unit for zero, and only if all values are zero - RPC: Always build pga* and proc* methods - Bugfix: icarus: Check for valid fd before all usage - Bugfix: Stratum initiate: Clear json var after freeing it, to avoid a potential double-free if retry fails before new JSON is parsed - Bugfix: Correct --log-file error message - Cleanly fall back to other micro- prefix symbols if locale doesn't support the preferred one(s) - Bugfix: bfg_waddstr: Missing break after selecting degrees symbol - Silence warning about (never really) uninitalised variable use in notifystatus - RPC: Complete split between devs/pga* and proc* methods - RPC: Internal restructuring to support device-wide view - RPC: Remove devdetail method, and rework newer devdetails to use its code - configure: Advise running ldconfig when detected and probably necessary - configure: Simplify final information summary - Bugfix: configure: Disable httpsrv/libevent if not available - README: Mention free GPU mining dependencies - Write config: Avoid writing default temperature settings - bitforce: Set default cutoff temperature to 85C for SC-class devices - When shutting down, don't wait for mining threads any longer after the 1 second sleep - bitfury: Silence warning about (never possible) uninitialised variable use - bigpic: Handle write failures - json_rpc_call_completed: Silence incorrect type cast warning - icarus: Silence warning about (never really) uninitalised variable use in icarus_scanhash - fpgautils: Check for fgets error - Silence warning about (never really) uninitalised variable use in multi_format_unit - ft232r: Silence warning about (never really) uninitalised variable use - Silence unused result warnings for notifier_{read,wake} - Log a warning if --cmd-* returns a non-zero exit code - configure: Update bigpic driver dependency on bitfury code - metabank: Initialise --temp-cutoff to 50C - README.ASIC: Document special care needed for some bitfury-based miners - Bugfix: bitfury: Correct results from RPC pgaset - bitfury: Move Slot and fasync RPC info to details instead of status - bitfury: Include chip fasync in RPC status - bfsb: Split up processors among a separate device per board - Bugfix: bitfury: Copy rxbufs to stack in case we need to do SPI communication in the meantime - bfsb: Merge bfsb_detect_chips into bfsb_autodetect (unchanged) - bfsb/metabank: Allow pgaset to change osc6_bits and SPI baud rate - bitfury: Fix code indentation - bitfury: bitfury_init_oldbuf: Optimise during runtime - metabank: Remove unused variables - bitfury: Send a work with lots of nonces to help cold-started bitfurys fill a static buffer - Bugfix: configure: Show --enable-bfsb/metabank in help, since they are disabled by default - metabank: Reduce i2c banking to only when necessary - bfsb: Only build spi_bfsb_select_bank if bfsb driver is being compiled - bitfury: Reorganize polling to hit chips sequentially, so SPI traffic can be minimised - bitfury: spi_emit_data: Return address read data will be located at after txrx - bitfury: After 8 bad nonces in a sample period, reinit immediately rather than waiting for the remaining up-to-0x38 - bitfury: Reinitialise chips if their active nonce stops changing - bitfury: Recalibrate immediately when we know we need it - bitfury: Reset chips if more than 8 hw errors are found in a 0x40 result sample period - bitfury: If previous nonce mismatch persists, try recalibrating oldbuf - bfsb: Shutdown chip when disabling - bfsb: Expose Clock Bits and Slot to RPC - configure: Simplify dynclock necessity detection - configure: Tie libudev usage to fpgautils - configure: Simplify fpgautils necessity detection - DevAPI: add_cgpu_slave for more elegant multi-device threads - Use procs count for device summaries, rather than iterating over linked list (which may span multiple devices) - metabank: Split up processors among a separate device per board - metabank: Merge metabank_detect_chips into metabank_autodetect (unchanged) - Removed temperature output from metabank_api_extra_device_status(). - Modified code to store temperature at cgpu->temp for metabank devices. - bitfury: Added get_api_extra_device_status for 'devs' request in metabank driver: Slot, Clock Bits, Temperature, Voltage. - minerloop_async: Run watchdog code within actual device thread - bitfury: Remove unused libbitfury_readHashData - Bugfix: DevAPI: Don't call job_process_results when there was no previous job - bigpic: Convert to async minerloop - bitfury: Port to Windows - bigpic: Use bitfury_fudge_nonce - Use common bitfury_decnonce for all bitfury-based devices - Rename bf1 driver to bigpic, as the same device has other brands too - bf1: Clean up log messages - bf1: Reduce loglevel of debug messages - Bugfix: bf1: Add missing header to Makefile.am, and fix .dname/.name - Bugfix: bf1: Fix warnings - BF1 driver modified to work under Windows -> packing of structs isn't working with Windows - Corrected hash rate estimation for BF1. Only 756 out of 1024 nonces are scanned. - Cleaning up the bf1 driver code - BF1 driver working - Bitfury BF1 source files added - bfsb: modified to use LukeJr:'s new code - configure: Reorder output - configure: Allow to build *fury drivers only - bitfury: Turn commented debug stuff into #ifdef BITFURY_SENDHASHDATA_DEBUG - bitfury: Implement queue_full to ensure all processors have a work ready before scanwork - bfsb: set api speed to 625khz - initial support for bitfurystrikesback boards - bitfury: LINE_LEN instead of 2048 - bitfury: 4Mhz SPI by default - bitfury: double SPI polling - bitfury: +Strange Counter -printf Counter - bitfury: tuning of parameters; fixed cycles calculation - bitfury: Move clock increase from common code to metabank driver init - bitfury: Add driver-bitfury.h for shared function declarations - bitfury: Do debug logging of read data before rotation - bitfury: Decode nonce array sooner to make debugging easier - bitfury: Check that the previous nonce still matches, to detect response corruption - bitfury: Workaround corruption by looking for matches rather than changes - bitfury: Rewrite using async minerloop (currently only setup on metabank driver) - bitfury: Fix memory issues - littlefury: Turn off chips when exiting - littlefury: Adapt to 16-bit payload size (protocol change) - Bugfix: littlefury: Fix bitfury_do_packet - bitfury: Report bad nonces properly - bitfury: Unify common nonce fudging code - Bugfix: bitfury: Chips only scan 0xbd000000 nonces per work - bitfury: Fix logging to use applog - bitfury: Split driver into bitfury_gpio (bare GPIO) and metabank (i2c banked GPIO) - littlefury: Use bitfury driver scanwork - bitfury: Eliminate more static variables - bitfury: Treat each chip as its own processor - bitfury: Resolve devices[chip] only once per chip - bitfury: Move second_run logic back to libbitfury - bitfury: Loop over chips once during scanwork - bitfury: Abstract hashes_done2 which keeps track of time deltas per thr on its own - littlefury: Need to set tv_morework - bitfury: Abstract out payload_to_atrvec - littlefury: Log read return value when unexpected - bitfury: Eliminate non-const global variables - littlefury: Safeguard on job switching - Bugfix: littlefury: Keep reading until error, EOF, or buffer filled - littlefury: Log devproto of incomplete reads - Enable littlefury detection - Bugfix: configure: Enable bitfury by default properly - bitfury: Require explicit -S bitfury:auto to probe GPIO-based SPI - bitfury: Move i2c slot handling to metabank-specific driver code - littlefury: Initial driver for BitCentury's USB miner - bitfury: Split actual chip detection into simple function - Bugfix: bitfury: Fix driver "name" to be correct length - bitfury: Abstract SPI interface - Bugfix: bitfury: Fix more warnings - Bugfix: bitfury: Fix warnings - bitfury: Intercept and use applog for perror calls - Bugfix: bitfury: Handle SPI init failure cleanly - bitfury: major intermediate update - bitfury: added chip series detection; multiple chip mining - Bitfury ASIC initial support - DynClk: Improve commented documentation - Replace Utility with (expected) Income calculated by actual shares submitted in 100% PPS value - format_unit3: BTC formatting with 2 decimal place digits - format_unit3: Support for nano- and pico- sizes - format_unit3: Use an enum for float-precision parameter - format_unit2: Support milli- and micro- unit prefixes - opencl: Disable by default if other devices are found; to enable, use -S opencl:auto - write_config: Save request-diff option - Stratum: Clear unused extranonce2 space - Don't even show 'Attempting to restart' for devices that don't support it - Workaround bug in PDCurses wresize - Bugfix: Include config.h in sha2.c first - make-release: Include libevent-2-0-5.dll in Windows packages - README: Document dependency on libevent - README: Document new --chroot-dir and --setuid options - Bugfix: Use correct configure define for chroot - Remove --disable-chroot build option: always compile --chroot-dir if supported - Bugfix: Use correct configure define for pwd.h - Improvements on code - Update miner.c - Added basic chroot support, added option to configure.ac. - Updated miner.c - Added basic chroot support - Replace u-hashrate with nonce-based hashrate adjusted for rejects/stales - SSM: Windows port - SSM: Allow configuring stratum port via --stratum-port option and RPC setconfig - SSM: Implement mining.hashes_done extension - Proxy: Catch invalid usernames and error - SSM: Report hashes done based on share submissions - SSM: Include current time in job ids to avoid false hardware errors due to job id reuse - SSM: If no notify is currently set, try to set it before refusing a subscribe - SSM: Prune old jobs after expiry - SSM: Use pool data read lock when subdividing notify - SSM: Gracefully fail when upstream stratum notify cannot be subdivided - SSM: Gracefully fail when upstream pool is not stratum (by closing subscribed clients, and refusing to subscribe new ones) - SSM: Properly fail cleanly when maximum clients are connected - SSM: Clean up stratumsrv_job when pruning it - SSM: Avoid responding to notifications, and give an error for unknown methods - SSM: Propagate work updates to clients - Mostly functional stratum proxy driver - Stratum: Split actual work generation away from the current pool data - Bugfix: Stratum: Dereference pool swork coinbase buffer inside data lock - SGW: Split proxy code out from driver-getwork into driver-proxy - Bugfix: miner.php: Check $dototal[$name] is set before comparing its value - Bugfix: RPC: Use bad_nonces in Hardware% instead of generic hw_errors - Bugfix: RPC: Handle LIFE_DEAD2 case - Make failure to open sharelog or noncelog abort startup - Nonce logging option --noncelog to simply store every nonce and its info - Abstract --sharelog option parsing BFGMiner Version 3.2.1 - September 19, 2013 - Only show long-poll message in pool summary if it's not using stratum. - README.ASIC: Clarify syntax of --scan-serial usage for USB Erupters - Bugfix: RPC: Defer allocation of apisock until after we check for --api-listen - make-release: Only try to include libmicrohttpd if bfgminer.exe depends on it - Bugfix: make-release: Include libplibc-1.dll if available - SGW: For Windows builds, include winsock2.h instead of POSIX networking headers - configure: Display getwork proxy server support in summary - Bugfix: SGW: Pass actual cgpu_info to prune_worklog_thread instead of silently casting one from getwork_client - Bugfix: Get total_staged with lock for TS stat, before getting console lock - Bugfix: bitforce: Correct fanmode RPC help - bitforce: Hide fan control when disabled in firmware - Bugfix: bitforce: Correct fanspeed TUI setting - Bugfix: logging: Allow up to 4 KB for log lines - Bugfix: icarus: Ensure last2_work exists before trying to check nonces fit it - README.OpenWrt: Include serialusb drivers - README: Include OpenWrt serialusb driver package names - Bugfix: Initialise notifier (as invalid) for no-thread devices (SGW) - Bugfix: Free temporary kernel path copy when writing config file - Bugfix: Put kernel path on the (main) stack after initialisation from commandline/config, to avoid appending an argv or jansson string - Bugfix: Always allow startup with curses enabled (since the user can use Manage devices to add new ones, and display Ctrl-C for text-only quit help - Bugfix: Ignore/reject libmicrohttpd before 0.9.5, which introduced symbols we need - README: Elaborate on format of BW - Bugfix: Try to initialise libusb later, so any mutexes applog might need are initialised - Bugfix: Implicitly initialise timer_set_now when it is first called - util: Eliminate unsafe const-removing casts - configure: Cleanup CFLAGS/LDFLAGS display - Show RT_LIBS in ./configure output. BFGMiner Version 3.2.0 - August 29, 2013 - cpu: sse2_64: Rename sha256_init to sha256_init_sse2 to avoid conflict with new sha2.c - httpsrv: Some older versions of libmicrohttpd need stdint.h included first - make-release: Include libmicrohttpd-10.dll if it exists - Fixes column alignment in decimal fields. Workaround for printf rounding up when formatting decimals into limited width. - New hidden --unittest option. No longer runs unit tests at startup by default, for faster startup. Added unit test for width printing of decimal numbers. - Bugfix: configure: More fixing BSD sed syntax for curses header search - Bugfix: configure: More fixing BSD sed syntax for libusb header search - README: Update solo mining docs - erupter: Continue searching a job until the end, even if an earlier result is found - icarus: Do hwerror-triggered reopen before sending the next job the first time, to avoid having to resend it later - icarus: Avoid sending a new job if the nonce found was for one before the current job anyway - icarus: Double-buffer work to solve nonces found before work change takes effect - icarus: Abstract nonce processing - Bugfix: icarus: Avoid trying to submit 0 on comms error - icarus: Skip unnecessary nonce memcpy - icarus: Make state->last_work a pointer and store it on the heap - miner.php fix missing global - Bugfix: erupter: After identify, skip starting work if next scanhash is already decided to be a "first run" (eg, device errors) - Bugfix: erupter: Fix identify following hw error - Bugfix: icarus: Don't try to send work if device open failed - Debug log when zeroing stats - Upgrade libblkmaker to 0.3.2 - Bugfix: GBT: Advertise coinbasevalue capability - Bugfix: Always compile add_serial now that it's somewhat generic - icarus: Avoid sending a new job if the nonce found was for one before the current job anyway - icarus: Double-buffer work to solve nonces found before work change takes effect - icarus: Abstract nonce processing - Bugfix: icarus: Avoid trying to submit 0 on comms error - icarus: Skip unnecessary nonce memcpy - icarus: Make state->last_work a pointer and store it on the heap - README.RPC: Remove usbstats mention - README: Add FAQ regarding cgminer messing up drivers - README.RPC: Fix miner name - README.RPC: Correct to mention other supported devices for pgaidentify - api.c fix mcast debug message bug - README.RPC: break all lines at most 80 characters - Update the API Multicast documentation - miner.php implement API Multicast handling to automatically find your local net miners - Bugfix: RPC: Fix log format types in io_flush - Set RT_LIBS correctly from autoconf detection - Explicitly check for clock_nanosleep and only use it when available - Further integrate cgsleep API into BFGMiner's timer system, so clock_nanosleep is only used on platforms with CLOCK_MONOTONIC - Convert cgtimer_t to struct timeval - Bugfix: Fix BSD sed syntax for curses header search - Bugfix: configure: Really fix BSD sed syntax for libusb header search - README.RPC: Mention multicast detection - README: Include --mcast-* options in usage - Use ccan's standard char* set/show functions for --api-mcast-{addr,code} - Bugfix: RPC: Use the same mcast code in reply, and log it correctly - API mcast only reply to remote IP's that are allowed access - Initial API Multicast response v0.1 to find cgminer APIs - Check for cnx_needed on each loop through wait_lp_current. - Return positive for cnx_needed when no_work is true. - Add no_work bool to set when we are in an underrun situation - Reorder support names alphabetically. - We don't want to continue into the hash_pop function if the getq is frozen. - Carve out the unused portions of sha2 implementation. - Import Aaron D. Gifford's fast sha256 implementation. - Use cloned work when finding avalon results since another thread can discard the work item while it's in use. - Provide a variant of find_work_bymidstate that returns a clone of the found work. - Use timespecs on windows as cgtimer_t to capitalise on the higher resolution clock changes. - Abstract out the conversion of system time to an lldiv_t in decimicroseconds. - Use our own gettimeofday implementation on windows for it to be consistent across ming builds and higher resolution. - Provide cgtimer_sub helper functions. - Provide cgtimer_to_ms helper functions. - Rename cgsleep_prepare_r as cgtimer_time to get time in cgtimer_t format and call cgsleep_prepare_r as a macro for cgtimer_time - TimeBeginPeriod and TimeEndPeriod do not add significant overhead when run the entire time for cgminer so avoid trying to maintain balanced numbers of them for specific time calls to simplify code. - Replace all references to the old n*sleep functions with the equivalent cgsleep_*s replacements. - timeGetTime uses huge resources on windows so revert to using timevals for its implementation of cgtimer_t - Quotient/remainder error in ms division. - Provide cgtimer_to_timeval helper functions. - Provide a timeval_to_cgtime helper function to reuse values. - Simplify cgsleep code for windows by using a typedef for cgtimer_t that resolves to clock resolution, using that internally. - On windows use the higher accuracy timegettime function to really get 1ms clock and timer accuracy. - Fix missed endtimeperiod in overrun timer on windows. - Make cgsleep_us_r take an int64_t for us. - Make the cgsleep functions build on windows. - Set high resolution timing on windows within the cgsleep functions. - Provide reentrant versions of cgsleep functions to allow start time to be set separately from the beginning of the actual sleep, allowing scheduling delays to be counted in the sleep. - Make the nmsleep and nusleep functions use the new cgsleep functions internally till functions are migrated to the new cgsleep API. - Add a ms_to_timespec helper function, and create a cgsleep_ms function that uses absolute timers with clock_nanosleep to avoid overruns. - Add rt lib linkage to enable use of clock_nanosleep functions with older glibc. - Add a timeraddspec helper function. - Provide a us_to_timespec helper function. - Provide a us_to_timeval helper function. - Add helper functions to convert timespec to timeval and vice versa. - Bugfix: SGW: Discard work from log only by expiry, so post-startup hardware errors are truly only hardware errors - bitforce: Make voltages available to RPC - bitforce: Save voltages as array of longs internally - Bugfix: Clear device bad nonces when zeroing stats - cpu & opencl: Defer RUNONCE to actual autodetection, so they can be added once after startup - Bugfix: Avoid crash activating [M]anage devices with no currently defined devices - TUI: Support for adding new devices using the plus key from [M]anage devices - Bugfix: Use add_serial function for scan_serial, so that "all" keyword works correctly - SGW: Add support for new X-Hashes-Done header to allow devices to more accurately report their work - SGW: Add X-Mining-Identifier header to inform devices what they are represented by in BFGMiner - SGW: Use JSON for 401 response; add Server HTTP header - AUTHORS: Add contributor Josh Lehan - Changed comparison constants to allow for floating-point rounding - Install README.ASIC for Block Erupter related drivers as well as Avalon - make-release: Windows users don't need README.Debian - README.OpenWrt: Import from BFGMiner downloads - Silence Windows warning about send data signedness - Fix block info - API/miner.php add some % fields - Don't yield on grabbing the read lock variant of cglocks. - util.c expand quit to show file/func/line - We should be using a cg_wlock initially in generating stratum work before downgrading the lock. - Add the ability to downgrade a write variant of the cglocks. - Yield after releasing a lock in case we are on a device with limited CPU resources. - Fix --scrypt being required before scrypt intensities on command line or not working at all via config files. - Limit intensity range according to whether scrypt is in use or not. - Do not allow benchmark mode to be used with scrypt. - miner.php format Best Share - README.ASIC block erupter USB brief - Check for negative wait time in socket_full. - Adjust socket wait timeout in recv_line according to how long we've already waited to avoid a 60 second wait dropping to 1 second due to a blocked socket. - force type checking on curses - logging - size check sprintf - Only use length-counted variants of format_unit and percentf - Implement snprintf-like versions of format_unit and percentf - miner - size check all sprintf - size check get_datestamp/get_timestamp and remove unused cgpu->init - make all statline overflow safe - Convert the decay_time function into one that truly creates an exponentially decaying average over opt_log_interval. - GPU fan rpm display 9999 when it overflows - Change mode on python file. - Only update hashmeter if we have done hashes or haven't updated longer than the log interval, fixing a us/ms error. - README.ASIC: Document usage with Block Erupter Blades - README: Add mention of libmicrohttpd to dependencies - SGW: Include hash1 in work - SGW: Include application/json Content-Type header - Fix build without libmicrohttpd - Allow startup to proceed with no devices, as long as RPC or HTTP are listening - SGW: Refuse to issue new work to disabled devices - SGW: Respond with stale rejection if share is known to be immediately stale - Expose HTTP getwork username in Manage TUI and devdetails RPC - Add --cmd-idle notification command for REST/WAIT conditions - bfgminer-rpc: Accept unlimited size replies - api-example.py: Accept unlimited size replies - RPC: Rewrite io_data to cleanly handle unlimited sized responses - Configure options to build --without-libmicrohttpd support - RPC: setconfig can now change http-port (enabling or disabling the http service included) - Embedded HTTP server to handle getwork-based mining devices - inc_hw_errors2 function can handle a bad nonce without a known work - Split up scan_serial function to enable internally adding ad-hoc cgpu - Helper functions bfg_json_obj_string and share __json_array_string - bytes_t: Add bytes_shift and bytes_nullterminate - Fix configure help for --without-sensors - README: Document --with-system-libblkmaker - Bugfix: Use BSD-friendly sed syntax for libusb header search - Bugfix: Handle bitstreams properly - Bugfix: Skip search for addwstr since it breaks --with-curses=preference - Bugfix: configure: Find correct curses include path without *-config - README.FPGA: Further clarify ZTEX setup - Display a friendly error directing to README.FPGA when bitstream cannot be loaded - Remove bitstreams from BFGMiner distribution entirely, and include pointers where to find them in README.FPGA - ztex: Use load_bitstream_bytes for .bin files - fpgautils: load_bitstream_bytes support for Intel HEX format (.ihx) files - Add missing 16-bit byteswap macros - fpgautils: load_bitstream_bytes function to load a bitstream into a bytes_t - bytes_t functions: bytes_init, bytes_append, bytes_reset - Look for bitstreams in /usr/share/bitstreams - Disable Unicode support by default (use --unicode to enable) - make-release: Skip stripping debug info from Windows EXEs, for now - Bugfix: Ensure work variable is assigned before checking its thr_id - Bugfix: notifier_init (Windows): setsockopt needs an int for SO_REUSEADDR - Bugfix: Avoid turning totals red just because a processor is idle - Enable notifications for sick/dead with --cmd-sick and --cmd-dead options that execute commands when the event occurs - RPC: Add cpu enable/disable/restart - windows-build: Update to use libpdcursesw.dll - Ensure socket error messages are used for socket errors on Windows - fpgautils: Attempt to use Linux advisory locks on serial devices - Bugfix: README: --temp-cutoff sets the maximum temperature before cutoff, not temperature that triggers cutoff - Bugfix: Avoid incrementing dev_thermal_cutoff_count when just updating timestamp on status - Detect curses support for wide characters during configure - configure: Check for more variants of curses library names - Bugfix: ztex: Avoid trying to format non-libusb error with libusb error name - Bugfix: Avoid trying to assign const use_unicode variable with --no-unicode option - Remove long-unused opt_time variable - RPC: Avoid exposing Coinbase-Sig when it isn't supported - Bugfix: Fix build with libblkmaker < 0.2 - Attempt stratum mining.suggest_target before mining.subscribe, if --request-diff is used - Retry stratum if initiation fails for any reason after we have sent something (assuming there is more older variants we can try) - Bugfix: Restore delay for authorization on stratum mining.get_transactions - Bugfix: Fix TUI-only build - bitforce: Extend pgaset _cmd to variable-length commands - Bugfix: Use red for total processor count - bitforce: Undocumented _cmd1 pgaset for experimenting with firmware - Bugfix: curses: use_default_colors() when possible - Bugfix: Calculate scrolling range with new cursor lines - Bugfix: Ensure use_unicode and have_unicode_degrees constants are defined for curses-less builds - Bugfix: Replace block_timeval with (time_t) block_time - README: Update for --no-unicode option BFGMiner Version 3.1.4 - August 2, 2013 - Windows: Rebuild pdcurses with UTF-8 and wide character support - Bugfix: Avoid using wide curses symbols/macros when USE_UNICODE is not defined - Unicode: Use line drawing in TUI Help - Use bfg_waddstr even with Unicode disabled, since it's needed for red highlight - Colour bad conditions in red - Unicode: Cross-tee intersecting lines - Unicode: Use WACS_VLINE for vertical lines - Unicode: If degrees symbol is available, add it to temperatures - Unicode: bfg_waddstr wrapper to handle non-ASCII characters, currently used only by logging and statlines - Unicode: Use WACS_HLINE for horizontal lines - Add framework for using Unicode in TUI (can be disabled with --no-unicode) - Avoid using potentially locale-dependent ctype functions in locale-independent contexts - Refactor temperature in TUI statlines to share code nicer - Bugfix: avalon: Fix applog formatting - Bugfix: Align totals columns in per-processor view - Bugfix: Fix curses-less build - configure: Workaround buggy autoconf versions - Bugfix: erupter: Include headers in order necessary for Windows - Bugfix: Reimplement get_intrange using strtol instead of sscanf (which is broken on Windows) - Bugfix: get_intrange: Check for extra garbage at the end, only after we know we have an end-position - Bugfix: Fix Enter key in TUI on Windows - erupter: Split identify-handling logic into handle_identify function - Bugfix: erupter: Ensure identify is handled during no-once or firstrun - erupter: After identify, check if a work restart is needed immediately - erupter: Implement identify function by pausing hashing for 3 seconds - Bugfix: icarus: Remember firstrun state in case it gets changed for the next run - icarus: Move actual dynclock updates to icarus_job_start - icarus: Split out icarus_job_prepare, and rename icarus_job_start - Bugfix: ZeroStats: Reset column widths to 1 - miner.php: Include max temperature in device totals line - Bugfix: Stratum Fix debug logging of initial mining.subscribe command - Bugfix: Call pool_set_opaque from work_decode, so block content hiding/ providing messages work for getwork/GBT - Split block contents hiding/providing notices out from stratum code - Add test suite for get_intrange - Bugfix: Check for error conditions in get_intrange to not have weird --device behaviour when bad values are provided - Bugfix: erupter: Take advantage of detectone_meta_info to handle Emerald autodetection - TUI Help describing the various status fields (contributed by midnightmagic) - Bugfix: ManageTUI: Allow 'I' key to be used by devices not supporting identify - Bugfix: Prefer Sapphire over Emerald for -S erupter:* - Bugfix: Clear total_bad_nonces when zeroing statistics - Bugfix: modminer: Since we are not searching iManuf string for needles, only look for "ModMiner" - Bugfix: sysfs autodetect: Recurse into tty/ subdirectory (necessary for CDC/ACM ttys) - sysfs autodetect: Split tty* directory search into new _sysfs_find_tty function - modminer: Reduce default clock to 190 MHz - README: Update driver info to include Erupter driver - README: FAQ about scrypt and difficulty - Include count of working devices/processors in totals statline - Format totals statline the same way as individual device/processor statlines - Rearrange TUI a bit, including menu at the top (+1 log line) and hashrate total closer to device summaries - Bugfix: setup_stratum_curl: Need to release stratum lock on connection failure too - Bugfix: Avoid unnecessary locks inside curses_print_status, which is called with the console lock held - Bugfix: setup_stratum_curl: Hold stratum lock until connection completes, to avoid potential races - Bugfix: stratum_works: If stratum is already active, it works (avoid trying to initialise it again) - Replace hashrate_to_bufstr/ti_hashrate_bufstr with format_unit/ multi_format_unit_array - New multi_format_unit_array to fill multiple buffers instead of building a delimited string - multi_format_unit: Skip recounting length of fixed-length strings - Shrink status line to fit in 80 columns - Add network bandwidth rate to TUI - New multi_format_unit variadic macro to handle formatting multiple numbers at once - format_unit: Option to choose 3-digit integer display vs 5-character floating- point display - Optimization: format_unit: Handle number first, to avoid having to restore suffix later - Generalise hashrate_pick_unit/hashrate_to_bufstr into pick_unit/format_unit - Extend hashrate_pick_unit/hashrate_to_bufstr to handle sub-kilo units - Split total_bytes_xfer to total_bytes_rcvd and total_bytes_sent - Bugfix: _decode_udev_enc_dup: Allocate enough space for full string - Bugfix: Never use waddstr for logwin, since it would bypass special newline handling - Bugfix: bitforce: Set kname on chip processors - bitforce: Include voltages in Manage device TUI - Defer newlines going to curses logwin, to avoid a useless blank line at the bottom of the window - Ensure printing to logwin always goes through _wlog - Remove blank line above log window - bitforce: Identify parallel queue protocol distinctly from mere bulk queue - ManageTUI: Include kernel name, when available - Stratum: Roll ntime as we generate work - Stratum: Make swork.ntime native-endian - Stratum: Treat ntime as uint32_t (as it should be), still always big endian - Debuglog ManageTUI actions/responses - ManageTUI: Add generic Identify support - Bugfix: Move serial_detect* and open_bitstream to DevAPI code so CPU/OpenCL can build properly without fpgautils - Short-circuit logging sooner in quiet mode - Write to both stderr and console within same console lock "session" - Bugfix: Also hold the console lock when writing to stderr - Use common console locking function for stdout in logging.c - Move console lock and unlock functions (which also handle thread cancelstate) to miner.h - Bugfix: bitforce: Only try to clear queues of SC devices, since FPGA MR boards interpret ZQX/ZOX differently - Timer-based gettimeofday substitute for systems with poor time-of-day clocks (Windows) - Use clock_gettime(CLOCK_MONOTONIC) for timers when available - Use QueryPerformanceCounter for timers on Windows - Generic refactoring for timer_set_now - Replace all remaining uses of gettimeofday for timers, with timer_set_now (aka cgtime) - Don't mix timers with timestamps (visual only) - Always use struct timeval for timers, and don't mix timers with timestamps (functional only) - get_datestamp: Change timeval parameter to time_t, and implement get_now_datestamp for common "current time" use case - Use get_datestamp for (non-microsecond) log timestamps - Bugfix: ztex: Allocate final processor names on the heap, so they survive when the stack for ztex_prepare is gone - Bugfix: ztex: Copy serial number to device "name" before cloning it for other processors - Bugfix: x6500: Use cgpu->temp directly since there is only one sensor per processor - Bugfix: Actually show the highest temperature, not just calculate it - x6500: Allow changing clock speed from TUI Manage device - x6500: Port pgaset clock from modminer driver at 66d2a3ab072fcdbc3c7ed41a97f265afa917bbee - modminer: Allow changing clock speed from TUI Manage device - bitforce: Flush job and result queues at startup to avoid unnecessary warnings - x6500: Reduce default clock to 190 MHz - Bugfix: fpgautils: Close libusb handle after copying USB strings - use BSD sed syntax to generate iospeed_local.h BFGMiner Version 3.1.3 - July 11, 2013 - Bugfix: Reset staged_full flag when discarding (stale) popped work, or increasing the queue minimum - Bugfix: Only trigger staged work underrun if a mining thread was actually waiting for it (and do so sooner, before it has the work made) - bytes_cpy: avoid malloc and memcpy when size is zero - fix infinite loop in bytes_cpy when size is zero - Bugfix: Generate iospeeds_local.h based on termios.h defines, and only try to use POSIX standard if that fails BFGMiner Version 3.1.2 - July 8, 2013 - When not compiling with optimizations, initialize unused nonce2 space to avoid warnings from memory checking tools - TUI Manage devices: Support PgUp/PgDn keys to skip over processors within the same device - Bugfix: bitforce: Prefer 2nd temperature if higher than 1st - When displaying device summary statlines, use the highest temperature reported by any processor - Stratum: Fix nonce2 sizes greater than 4 and (on big-endian) smaller than 4 - bitforce: Manage TUI: Display both temperatures (if two), and enable changing fan speed - opencl: Add fan speed to Manage device TUI now that it's been removed from statline - DevAPI: Remove old statline APIs entirely, and add new override_statline_temp (used by modminer/x6500 for upload %) - README: Update statlines - TUI: Replace DevAPI statline_before with a predefined temperature column to free up statline space - Refactor and simplify bin2hex to speed up and avoid unnecessary heap use - stratum: Refactor work generation to do hex2bin conversions once, rather than every single header generated - Implement bytes_t for generic binary data storage (including smart realloc- based resize) - Bugfix: fpgautils: Only try to change baud rate when requested - x6500: Provide manuf/product/serial to cgpu interface - ztex: Provide manuf/product/serial to cgpu interface - erupter: Use baud 115200 by default - List valid baud rates once in iospeeds.h and standardize conversions - TUI: Display device manufacturer/product/serial in Manage device screen, when available - DevAPI: Store manufacturer/product/serial for each device - fpgautils: detectone_meta_info to provide metainformation (manufacturer, product, serial) on devices to detectone functions - Bugfix: fpgautils: Close product string file from sysfs (autodetect) - erupter: New icarus-based driver to handle autodetection of Block Erupter devices - Add --log-file option which redirects stderr to a file, but valid anywhere in the commandline or config file - Detect staged work underruns and increase queue to avoid them - Rewrite hex2bin to perform much faster (reduces minirig CPU usage by more than half!) - README: Add condensed list of dependencies - Enable "maintainer mode" by default - Bugfix: opencl: TUI manage: "Change settings" must not be compiled in with no-ADL builds - Bugfix: Detect whether the linker accepts -zorigin before attempting to use it - opencl: ADL: ADL_Adapter_ID_Get fails with newer drivers, so tolerate its failure best we can - opencl: Don't try to use BFI_INT patching with APP-SDK newer than 1084 (Catalyst 13.1), since it doesn't work - fpgautils: Elaborate that bitstream open failures are probably due to missing bitstream package - fpgautils: s/firmware/bitstream/ - Bugfix: Cleanup handling of complete device/driver failure - Deprecate -C (enable CPU) and -G (disable GPU) options, now that -S drv:[no]auto can be used for the same purposes - Bugfix: Since at least one of unix (or __APPLE__) or WIN32 is required by util.h, make sure unix is defined if WIN32 is not - Bugfix: Set ELF rpath for bundled libblkmaker to use $ORIGIN so it can be run from other directories - Bugfix: Cleanup needs to happen before printing the final quit message, or it gets lost in TUI mode - Bugfix: fpgautils: Initialize my_dev_t instances with null bytes, to ensure random unused data cannot influence hash keys - opencl: ManageTUI: Clear log cleanly for changing settings - Remove "GPU management" TUI entirely - opencl: Use new "Manage device" interface to do everything "GPU management" used to be used for - DevAPI: Add interface for drivers to define custom "Manage device" options - DevAPI: New function called to display additional processor information for "Manage devices" - TUI: Add enable/disable commands to device management - TUI: Implement beginnings of generic device management interface - Bugfix: avalon: Fix LIFE_INIT2 setting - Add LIFE_INIT2 status (safe to call functions, but not mining yet) for devices that want to report initialization status in their statline - Bugfix: modminer: Only program once for --force-dev-init - Bugfix: x6500: Only program once for --force-dev-init - fpgautils: Workaround and document Xcode clang bug - Bugfix: avalon: Correctly claim serial port - Bugfix: -S all: Mac OS X needs to probe /dev/cu.*, not just /dev/cu.usb* - cpu & opencl: Refuse to detect more than once - cpu & opencl: Respect scan-serial auto/noauto instructions - ft232r & libztex: Skip probe of claimed devices - fpgautils: Check for devices being claimed before calling detectone from autodetectors - x6500 & ztex: Claim USB devices - fpgautils: Implement bfg_claim_usb for claiming devices by USB bus number and address - fpgautils: Replace serial_claim with bfg_claim_serial using a more cleanly extensible interface and implementation - fpgautils: serial_claim: Include a bus enum in hash key - Add serial port claiming logic to avalon, bitforce, and modminer drivers - RPC: "devscan" command to probe for new devices - New (internal) scan_serial function to probe for new devices at runtime - Split out per-cgpu temperature configuration code to load_temp_config_cgpu - DevAPI: Modify add_cgpu to use temporary devices_new array, so detection can be done without touching live variables - Move more cgpu initialization to allocate_cgpu - Move devtype default assignment to allocate_cgpu - Move cgpu startup routine to new start_cgpu function - Move cgpu_info allocation to new allocate_cgpu function - Move *.drv_detect calls to a new drv_detect_all function - DevAPI: add_cgpu: There is no need to hold mutexes while creating devices - Bugfix: cpu: Update device "kernel name" with auto-selected algorithm - usbtest: Improve portability to at least 2.7 and 3.2 - usbtest: Avoid messing up the display by escaping weird bytes via repr() - usbtest: Skip last 2 optional parameters, since we use the defaults and they are not in older versions of pyserial - Bugfix: bitforce: ZOX limits results to 16 results per call, so repeat ZOX until there are fewer - Bugfix: Initialization for bfgtls needs to be done in each thread - Bugfix: stratum: Be patient with stratum lines that come in slower than we can process them - Use bfg_strerror in locations previously just logging raw error numbers - Bugfix: stratum: Log WSAGetLastError() for error number on recv failures on Windows - Use bfg_strerror where it is already needed (for thread-safety) - New thread-safe bfg_strerror function to portably stringify error codes - Bugfix: bitforce_queue: Initialize buf2 so errors don't cause the work queue to flush - TUI: Display percentage invalid of found nonces with hw errors - Bugfix: modminer & x6500: Increment *->diff1 for all bad nonces - percentf2 that takes t as precalculated total - Keep track of bad nonces independently from generic hw errors - inc_hw_errors: Resolve cgpu outside of mutex - Use inc_hw_errors function at every site which increases hw_errors BFGMiner Version 3.1.1 - June 22, 2013 - stratum: Deliver exact socket-error within the debug error message - Don't install docs for (compile-time) disabled functionality - Bugfix: Handle make dependencies on subdirectory files properly - Bugfix: Use EXTRA_*_DEPENDENCIES for Cygwin workaround, to fix program make dependencies - Support new overclocking speeds for avalon: 325, 350 and 375 - Bugfix: logging: Since we are inlining snprintf, stdio.h is needed - Bugfix: serial_autodetect_ftdi: Debuglog FTDI COM port mappings returned, fix type of FT_HANDLE - Bugfix: Allow starting non-libusb devices if libusb_init fails - Bugfix: Add missing newline to libusb_init failure message - Bugfix: opencl: Remove unnecessary casts from rot() macro, which created type issues - Bugfix: Remove unused variables - Suspend stratum connections when we know they've failed and don't try to recv data from them once the socket no longer exists. - applog/quit fix GPU errors created - logging remove extra added - remove varargs from logging/quit/in general as much as possible - compile unix code on Mac OS X fixes not finding the config file in $HOME - Create a pool_localgen bool function for testing when a pool can generate work locally. - Use mining start time for device MH/U calculations - Bugfix: Save start time for stats to correct "Elapsed" key on "stats" RPC request - modminer: tidy up free in device detect function - bitforce: RPC pgaset fanmode 9 for auto fan control - Bugfix: usbtest: Correct obvious typos - Initial import of usbtest.py script - Include microseconds in log output with new --log-microseconds option - bitforce: Workaround chip ids not necessarily being in order by choosing processor count based on expected chip ids rather than parallelization - serial_autodetect_ftdi: Debuglog FTDI COM port mappings returned - Bugfix: On stratum disconnect, clear stratum_active and stratum_notify atomically along with sock - Windows: Use backtrace.dll to print usable backtraces to stderr on crash - Bugfix: bitforce: parallelized: Properly handle parallelized protocol with only 1 chip - Bugfix: bitforce: XLINK: Increment boardno when moving on to the next board - bitforce: XLINK: Update to use actual length,xlinkid header order - Bugfix: bitforce: XLINK: Avoid trying to send 0 bytes after each write - Bugfix: opencl: Build fpgautils even if OpenCL is the only driver, now that it uses it for kernel-finding - Bugfix: Do not try to call get_stats or get_statline* if device is still initializing - Bugfix: opencl: Add missing include for fpgautils.h (needed for open_bitstream) BFGMiner Version 3.1.0 - June 13, 2013 - Bugfix: openwrt: Expect fixed bitstream path for input - Improve Makefile organization and fix "make install" - icarus: Upgrade work division autodetection probe to be faster and also detect 8 core - Calculate rejection percentage based on weighed shares, not absolute counts - Count weighed discarded (stale) shares per cgpu - Bugfix: Cleanly count discarded (stale) shares without overlapping devices/sshare locks within clear_stratum_shares - configure: Enable Avalon support by default now that it behaves reasonably - avalon: Since detection is not really implemented, only probe devices when the driver is specified to -S by name (eg, "avalon:/dev/ttyUSB0") - Bugfix: bitforce_queue: Never try to reinitialize a slave processor - Bugfix: bitforce_queue: Use work_list_del everywhere we remove from work_list to ensure work gets freed properly when done - Reduce HW error logging to debug level, so it doesn't clutter the TUI log by default - DevAPI: When a device has more than 26 processors, represent them as aa-zz - bitforce: bitforce: Fix TUI display of slave processors on parallelized boards - bitforce: Only display temperature in TUI for the first chip on parallelized boards - Bugfix: bitforce: Set temperature for all processors on the board - Bugfix: bitforce_queue: Initialize proc->device_data to board data for parallelized chip processors - Bugfix: bitforce_queue: Defer nonce count check until after thiswork has been identified (or handled as NULL) - avalon: avalon_clear_readbuf can simply wait for a single read timeout on Windows to avoid select - avalon: Simplify avalon_get_result by using avalon_gets - avalon: Go back to good old serial timeouts for gets, since select() is for sockets (only, on Windows) - Updated api.c to return the hashrate with 3 decimal places - Change hashrate display to never display 3 fractional digits, because it looks a bit ugly - bitforce: Credit hashrate to the correct chip on parallelized devices - Re-set work thr_id on share submissions, to ensure the result gets credited to the correct device in case of shared job queues (as in BitForce long boards) - bitforce: Turn parallelization into separate logical processors for more details on each, including working with XLink - bitforce_queue: Implement job sanity checks using new "ZqX" for devices using parallelization - bitforce_queue: Minimal support for parallelization - Add --device-protocol-dump option to debuglog low-level bitforce protocol details - When shutting down, set work restart flag (and trigger notifier) to help mining threads escape to their main minerloop (and check for shutdown) - Document and check for uthash version 1.9.2+ - Bugfix: Don't report failure for graceful mining thread shutdown - Name devices in mining thread failures - Warn about killing mining threads - Bugfix: Wake up mining threads when asking them to shutdown - Disable pthread cancel within curses locking - Shorten the avalon statline to fit in the curses interface and show the lowest speed fan cooling the asic devices. - Change switch_compact function name to switch_logsize to be used for other changes. - Only adjust cursor positions with curses locked. - devs display - fix GPU duplicate bug - basic copyright statement in API.java - Change the --device parameter parsing and configuration to accept ranges and comma separated values. - Modify scrypt kernel message. - Check for pool_disabled in wait_lp_current - Check for pool enabled in cnx_needed. - Add README.ASIC to debian packaging and make-release - Document avalon options in ASIC-README - Create README.ASIC with basic summary of supported ASIC devices. - Do avalon driver detection last as it will try to claim any similar device and they are not reliably detected. - Set the fanspeed to the nominal chosen for GPUs. - Clamp initial GPU fanspeed to within user specified range. - Avalon fan factor is already multiplied into the info values. - Get rid of zeros which corrupt display. - Logic fail on minimum fanspeed reporting. - Provide a workaround for fan0 sensor not being used on avalon and pad fan RPM with zeros. - Add ambient temp and lowest fan RPM information to avalon statline. - Display max temperature and fanspeed data for avalon. - Set devices to disabled after they exit the hashing loops to prevent the watchdog thread from trying to act on them. - Scanhash functions perform driver shutdown so don't repeat it. - Change the opencl shutdown sequence. - Send the shutdown message to threads and do the thread shutdown functions before more forcefully sending pthread_cancel to threads. - Icarus report data direction with comms errors - Execute driver shutdown sequence during kill_work. - Provide an nusleep equivalent function to nmsleep. - Set avalon_info to device data void struct. - Make submit_nonce return a bool for whether it's a valid share or not. - Do a non-blocking read of anything in the avalon buffer after opening the device. - Assign the avalon info data to the device_data in cgpu_info. - Rename cgpu_data to use new device_data - miner.h remove unused device_file and add device_data - Must unlock curses as well in logwin_update. - icarus report usb write error information - Make mining threads report out during work submission. - submit_work_async is no longer used directly by driver code. - Create a logwin_update function which mandatorily updates the logwin and use it when input is expected to prevent display refresh delays. - All stratum calls to recv_line are serialised from the one place so there is no need to use locking around recv(). - Only allow the mining thread to be cancelled when it is not within driver code, making for cleaner shutdown and allowing us to pthread_join the miner threads on kill_work(). - Implement pthread_testcancel replacement for BIONIC - Attribute whatever stats we can get on untracked stratum shares based on current pool diff. - Downgrade OpenCL headers to 1.0, which work fine for our purposes and are more compatible - icarus: If work_division autodetect fails, just use the old default of 2 - avalonhost-raminst script to help with installing to RAM on Avalon-host routers - Attempt to probe /dev/cu.usb* for fallback "-S all" - openwrt: Download uthash dependency - Bugfix: openwrt: Always build with libsensors support disabled - configure: Check for uthash headers - Bugfix: ztex: Only destroy libztex device after the last handle to it has been released - ztex: Remove libztex slave device interface, simply passing fpgaNum to selectFpga - Bugfix: cpu: Fix yasm and sse2 detection - cpu: Check for SSE2 support independently from yasm - Bugfix: cpu: Make sure to link libsse2cpuminer.a before x86_32/libx8632.a - Bugfix: cpu: Only build libsse2cpuminer iff yasm is available and targetting x86_32 - Bugfix: Free work only after deleting it from list - Remove embedded uthash.h and utlist.h from Makefile - windows-build.txt: Update for system uthash - Remove embedded uthash (and add dependency on system uthash) - Replace elist.h with utlist.h - Bugfix: Fix build with CPU mining and *without* yasm - cpu: Be explicit about size of sha256_init address - cpu: Add --algo fastauto (new default) to detect a usable algorithm without taking over a minute - cpu: Default to --algo auto - cpu: Support all platform-applicable assembly algorithms, even if used CFLAGS don't support them - Ubuntu: Updated changelog, added scrypt support. - cpu: Set fixed symbol names for stuff shared with assembly - cpu: Create Mach-O asm binaries on Darwin-based systems - Bugfix: cpu: Use COFF yasm binfmt on Cygwin - Bugfix: cpu: Get correct nonce from data, where the CPU sub-drivers leave it - Remove redundant "Reject ratio" in exit-time summary - Apply "R+S(%)" formatting to long-form statistics - Group stale shares in with rejects (but still distinctly counted) and make the percentage be (reject+stale)/total - Include rejected shares as a percentage - Move Utility and Best Share to status line - Remove LW from status line, since it is basically useless - ztex: Clean up a lot of ugly casting - Bugfix: Correctly avoid SIGPIPE on Mac - Make set_work_target a function to set a specified char as target for use elsewhere. - Minor typo. - Support more shares to be returned for scrypt mining. - Set all stratum sockets to nonblocking to avoid trying to use MSG_DONTWAIT on windows. - Only use MSG_NOSIGNAL for !win32 since it doesn't exist on windows. - Use MSG_NOSIGNAL on stratum send() - Set TCP_NODELAY for !linux for raw sockets. - Use TCP_NODELAY with raw sockets if !opt_delaynet - Recheck select succeeds on EWOULDBLOCK for stratum. - Don't use TCP_NODELAY if opt_delaynet is enabled with stratum. - Fix warnings in avalon driver. - correct applog typing - Simplify the many lines passed as API data in the avalon driver now that the API does not need persistent storage for the name. - Duplicate the name string always in api_add_data_full to not need persistent storage for names passed to it. - Add extra matching work count data in API for Avalon with 4 modules. - Clean up summary slightly better on exit. - opencl: Disable using binary kernels on Apple by default - Use sock_blocks in api.c - Fix build and distdir. - compile on win32 - Update README.scrypt with improved hashrates for 7970. - Use copy_time helper throughout miner.c - Provide wrappers for commonly used timer routines with API stats. - Use flip32 function instead of open coding it in gen_stratum_work. - Move util.c exports to util.h - Replace gettimeofday usage with cgtime - Adopt gettimeofday wrapper from cgminer (cgtime) that is always called with tz set to NULL and increases the resolution on windows. - Add high resolution to nmsleep wrapper on windows. - Bugfix: Export stats_lock for deviceapi - Set default ocl work size for scrypt to 256. - fliter out the wrong result from adjust fan code - Set last device valid work on adding device. - Make scrypt submission use the submit_nonce code, with nonces matching endianness. - Increment hardware error count from the one site. - compile avalon driver on win32 and win64 - build out of source dir - Rename scrypt regenhash function for consistency. - Add Mac FAQ. - Further driver FAQs. - Check for work restart after disable in the hash queued work loop since it may be a long time before we re-enable a device. - Unconditionally test for many wrong results on avalon and reset to avoid passing a corrupt avalon result to temperature code. - Only reset an avalon device with no results when there are no results consecutively. - More FAQs. - Avoid applog in recalloc_sock. - Avoid applog under cg_wlock. - Put spacing around locking code for clarity. - Avoid applog under pool_lock. - Avoid more recursive locks. - Avoid applog while ch_lock is held. - Avoid recursive locks in fill_queue. - Variable is already initialised in global scope. - More GPU FAQs. - More README faqs. - Yet more README faqs. - Add more FAQs to README. - Wrap result wrong tests in avalon scanhash in unlikely() and only consider a hash count of zero wrong if a restart wasn't issued. - avalon: if result_wrong >= get_work_count jump out the read loop - Fix warning on 32bit. - fix the fan control on max temp2/3 - for some reason network down. one simple bfgminer command: "bfgminer -o 127.0.0.1:8888 -O fa:ke --avalon-options 115200:32:10:50:256" can idle the avalon for safe power and protect chip - if hash_count == 0; reinit avalon, fix the 0MHS bug use the max value of temp1 and temp2 for fan control - Reinstate the matching_work_count per subdevice on avalon based on the work subid. - Rationalise and simplify the share diff and block solve detection to a common site. - subid field for devices that do not yet support the distinct device/processor interface - Make the avalon array size a macro. - Use replacement of work items in the avalon buffer as needed instead of flushing them. - Reinstate wrong work count to reset avalon regardless and display number of wrong results. - select() on serial usb in avalon does not work properly with zero timeout. - Use no timeout on further reads in avalon_gets - Do sequential reads in avalon_get_reset to cope with partial reads. - Show read discrepancy in avalon_get_reset. - Reuse avalon_get_work_count variable. - Check for AVA_GETS_RESTART when deciding if avalon has messed up. - Make the detection of all wrong results on avalon much more conservative to avoid false positives on work restarts. - Show error codes on select and read fail in avalon. - If we get a restart message in avalon_gets still check if there's a receive message to parse first without a timeout before returning AVA_GETS_RESTART. - avalon_gets is always called from the one call site so inline it. - The read_count is unused by the avalon get result code and no longer required for avalon reset so simplify code removing it. - Use a separate avalon_get_reset function for resetting avalon instead of using avalon_get_result. - The current hash count returned by avalon scanhash is just an obfuscated utility counter so make it explicit. - Check for a restart before a timeout in message parsing code in avalon. - We should check for a restart message before checking for a timeout in avalon scanhash. - Store the subid for the work item in avalon. - Fix record_temp_fan function in avalon driver. - Remove inappropriate memset of struct avalon result which was corrupting fan values. - Only do_avalon_close once on multiple errors. - Reset the result_wrong count on block change in avalon scanhash to prevent false positives for all nonces failed. - Small timeouts on select() instead of instant timeout increase reliability of socket reads and writes. - Rotate the avalon work array and free work on AVA_SEND_BUFFER_EMPTY as well. - Only get extra work in fill_queue if we don't have any unqueued work in the list. - Don't get any work if our queue is already full in avalon_fill. - Free avalon->works in the event we call avalon_prepare on failure to initialise. - Fix warnings. - Create an array of 4 lots of work for avalon and cycle through them. - Remove unused per unit matching work count for avalon. - Rename the confusing avalon_info pointer. - Simplify avalon scanhash code using the new find_queued_work_bymidstate function. - Members of cgpu_info for avalon are not meant to be in the union. - Use correct struct device_drv for avalon_drv. - Check enough work is queued before queueing more in avalon_fill. - Actually put the work in the avalon queue. - Rename avalon_api to avalon_drv. - First draft of port of avalon driver to new cgminer queued infrastructure. - Minor README updates. - README.GPU: Properly warn about overclocking damage - Add example 7970 tuning for scrypt in readme. - Update driver recommendations. - Add extensive GPU FAQs for the flood of new Scrypt miners. - Bugfix: Expect bitstreams and kernels to be in the srcdir, not build dir - cpu: Prefer sse4_64 algorithm if supported - cpu: sse2_32: Force bare symbols for cross-asm/C symbols - Compile CPU mining for win32 and win64 - configure: Check for pthread in -lwinpthread - Use has_pth flag instead of trying to mess with pthread internals - configure: Explicitly check for nanosleep - configure: Include "no" response in BFG_PTHREAD_FLAG_CHECK - miner.h missing extern - Update links and recommended SDKs. - Bugfix: README.GPU: Fix some typos - Update README to match changes to display. - Remove increasingly irrelevant discarded work from status lines. - Remove increasingly irrelevant GW value from status. - README.GPU: Correct terminology - Update README about intensity. - Bugfix: Restore always autodetecting stratum as non-scrypt - icarus: Replace default of 2 work_division/fpga_count with autodetection of 1, 2, or 4 - Update scrypt readme with newer information and to match changes in code. - Set default GPU threads to 1 for scrypt. - Connect backup stratum pools if the primary pool cannot deliver work. - Use a new algorithm for choosing a thread concurrency when none or no shader value is specified for scrypt. - Do not round up the bufsize to the maximum allocable with scrypt. - Remove the rounding-up of the scrypt padbuffer which was not effectual and counter-productive on devices with lots of ram, limiting thread concurrencies and intensities. - Make pool adding while running asynchronous, using the pool test thread functionality. - Only curl easy cleanup a stratum curl if it exists. - Add intermediate variants of cglocks that can be up or downgraded to read or write locks and use them for stratum work generation. - Move the stratum data to be protected under a new cg_lock data_lock. - Convert the ch_lock to cg_lock. - Convert the control_lock to a cg_lock. - Remove unused qd_lock. - Implement cg_lock write biased rwlocks. - Don't start testing any pools with the watchpool thread if any of the test threads are still active. - Set sockd to false should curl setup fail on stratum. - Reopen the socket whenever we're retrying stratum. - Set pool died on failed testing to allow idle flag and time to be set. - Remove unused pthread_t typedefs from struct pool. - Perform pool_resus on all pools that are found alive with the test pool threads. - Use pool_unworkable in select_balanced as well. - Differentiate pool_unusable from pool_unworkable. - Keep a connection open on higher priority stratum pools to fail back to them. - Set the wrong bool in pool_active - Only bypass unusable pools if they're not the selected ones. - Find the first usable pool in preference to the current pool in select_pool for work. - Add a pool_unusable function which checks if a pool is stratum but not active to use within switch_pools. - API no longer ignore send() status - API make the main socket non-static - Start the stratum thread only if we successfully init and authorise it, otherwise unset the init flag. - Make the initialisation of the stratum thread more robust allowing the watchpool thread safe access to it after the stratum thread is started. - Shorten the time before keepalive probes are sent out and how frequently they're sent with stratum curls. - Display select return value on select fail in stratum thread. - Clear the socket of anything in the receive buffer if we're going to retry connecting. - Perform pool resus on pools that were not set as the initial pool at startup. - Allow pools to be resuscitated on first startup by the watchpool thread. - Check all pools simultaneously at startup switching to the first alive one to speed up startup. - Close any sockets opened if we fail to initiate stratum but have opened the socket. - API use control_lock when switching pools - Clear last pool work on switching pools if the current pool supports local work generation or we are in failover only mode. - make rw locks: mining_thr_lock and devices_lock - work queues - remove new but unnecessary functions - functions for handling work queues - find_work() to find work in devices work queue - Add a get_queued function for devices to use to retrieve work items from the queued hashtable. - Add the choice of hash loop to the device driver, defaulting to hash_sole_work if none is specified. - Add a driver specific flush_work for queued devices that may have work items already queued to abort working on them on the device and discard them. - Flush queued work on a restart from the hash database and discard the work structs. - Create a central point for removal of work items completed by queued device drivers. - Create a fill_queue function that creates hashtables of as many work items as is required by the device driver till it flags the queue full. - Create the hash queued work variant for use with devices that are fast enough to require a queue. - Make sure to do full avalon_init if the device_fd is invalid. - Document extra zero byte in avalon_reset. - Microoptimise likely paths in avalon_gets. - Make sure to set timeout to 100ms instead of 1ms in avalon read loop for select. - Make sure to get time of first response in avalon read loop. - Use select for a reliable timeout in avalon read and don't read 1 byte at a time, optimising read loop. - We should break on the loop on a work restart in avalon, and only consider all errors if we actually have gotten some results. - Avalon init on comms error as well. - Reinit avalon device in case of FPGA controller mess up. - Increase reliability of avalon startup by only opening and resetting the device once, looking for the id sequence offset by one byte as well, and still ignoring if it's wrong, assuming it is an avalon. - Nest avalon_decode functions to avoid doing unnecessary lookups once we have found the nonce. - Use htole32 wrapper for nonce encoding in avalon. - Remove unused rev8 function from avalon driver. - Remove const qualifier from driver structs - rename device_api -> device_drv and all related api -> drv - rename get_proc_by_id() to get_devices() - Wrap access to devices array under a mutex - Provide wrappers for grabbing of thr value under the mining_thr_lock. - mutex all access to mining_thr - Split thr_info array into control_thr and mining_thr pointers so more mining threads can be added later - Update the hashmeter at most 5 times per second. - Speed up watchdog interval and therefore display updates to 2 seconds. - Add README.GPU to EXTRA_DIST. - Split out the GPU specific information from the README into a README.GPU file. - Update docs and reorder README to show executive summary near top. - Add more FAQs about crossfire. - Convert error getting device IDs in ocl code to info log level only since multiple platforms may be installed and the error is harmless there. - Unnecessary extra array in ocl code. - Cope with the highest opencl platform not having usable devices. - Update kernel file names signifying changes. - Use constants from the array of __constants throughout the diablo kernel. - Create a __constant array for use within diablo kernel. - Use global constant arrays for all other constants used in scrypt kernel. - Use global __constants for sha functions in scrypt kernel. - Use constants for endian swap macros. - Revise scrypt kernel copyright notice. - Separate out additions in scrypt kernel. - Reuse some Vals[] variables that can be assigned to constants earlier in the poclbm kernel, making for fewer ops. - Put all constants used in poclbm kernel into __const memory array to speed up concurrent reads on the wavefront. - opencl: Support for reading temperature from free software radeon drivers via libsensors BFGMiner Version 3.0.3 - June 13, 2013 - make-release: Include all submodules, recursively - Remove API.java example (no copyright license) - Minimally fix "make install" to ignore bitstream sources - Add submodule for ZtexBTCMiner (aka ztex_ufm1_*) - Add submodule for X6000_ztex_comm4 (aka x6500-overclocker-0402) - ztex: Use standard file header/comment formatting, and update license to GPLv3 - bitforce: Allow a longer timeout (1250ms) for ZCX (Device Information) commands - Bugfix: pdcurses doesn't like changing logwin size without clearing it also, so do that in display options - -S all: Start QueryDosDevices probe with 256-byte buffer - Use common code to ensure the prefix of -S *:all remains in all implementations of it (/dev glob was removing prefixes) - bitforce_queue: Ensure comma following nonce count is there, to error cleanly - bitforce: Report queue send failures, and count as hw errors - Bugfix: bitforce_queue: Don't try to send ready-to-queue work to device, when there is no ready-to-queue work - Bugfix: bitforce: Clear want_to_send_queue flag when flushing queue, since we can't send an empty queue - bitforce: Include new total queued count in flush debugging - Bugfix: bitforce_queue: Implement a minimum wait time of 10ms - README: Document serial device format for Mac OS X - Bugfix: cairnsmore1: Enable building with libudev autodetection even if only icarus drivers are enabled - Bugfix: sysfs autodetect: Continue searching even after finding one tty (fixes multiple ttys per device, such as some Cairnsmore1s) - Bugfix: ztex: Avoid destroying libztex device in case other processors are still in use (fixes crash when 1.15y becomes unavailable) - Update windows-build.txt - ccan: Add missing copyright comment headers - Remove obsolete mknsis.sh - Add missing copyright sections to files that may need them - Standard copyright format (including year) for adl_functions.h - Bugfix: When disabling device, ensure its prev_work (if any) gets freed properly - Check stratum socket exists and is writable before even considering whether the sessionid is the same or not - Bugfix: Check that the stratum_share struct for a failed submission is still in the submission hashtable before trying to delete it - README: Add missing documentation for CPU algorithms cryptopp_asm32, sse2_32, and altivec_4way - Bugfix: icarus: Check work restart before timeout - Bugfix: icarus: Debuglog the correct read timeout (and omit from work restart since there's no trivial way to get it) - README: Update links - Bugfix: cpu: Fix warning on Win64 - Bugfix: avalon: Strict formatting - Bugfix: Cleanup trivial warnings - Bugfix: bitforce: Seek to end of nonce counter to find nonces, in case there are more than 9 - Bugfix: Build hexdump.c into the project normally like everything else - Bugfix: Really fix device entries in saved config file - Update the write config to properly record device entries and remove disabled option. - avalon: Really fix applog formatting - va_copy is meant to be matched by a va_end in log_generic. - Further fix distdir for hexdump.c - Fulltest is true if value is <= target. - Fix warning with no curses built in. - Bugfix: configure: Check NEED_FPGAUTILS correctly - configure: Better grammar for --enable-cpumining help - Bugfix: Check for SSE 4.1 support before building sse4_64 asm CPU miner (uses MOVNTDQA instruction) - Bugfix: elist: Use uintptr_t for member offset - Bugfix: opencl/adl: Fix format string - Bugfix: opencl: Correct usage of formatted prints - Increase fd limits as much as possible at startup - Bugfix: bitforce: bulk queue: Cleanly retry for high temperature recovery - Fixed deps for raring, which has newer libudev1. - bitforce: debuglog actual result data - Bugfix: Missing 'else' can result in null pointer dereference in race - Minor grammo in avalon driver. - Make avalon temperature reading LOG_INFO level. - Fix the problem of seting up termio of ttyUSB0 for icarus. the CSIZE is the mask of CS2/4/8 - bufsize is an unsigned integer, make it so for debug. - Bugfix: bitforce: Include get_api_stats in BQUEUE mode - Bugfix: Always compile support for commandline --temp-target and --temp-cutoff, and write them in the config for all devices - Bugfix: Ensure cURL timers get set correctly in submission thread - Bugfix: modminer: Remove unused parameter to sprintf - Bugfix: modminer: Use correct format for bytes left in bitstream upload message - Bugfix: Access strategy name string directly instead of accidentally - Add printf-format syntax checks to more functions that should use it - AUTHORS: Add more contributors - Support configure flag --with-system-libblkmaker to allow building without the bundled copy - Bugfix: Use HTTP/1.1 compatible product token for User-Agent header BFGMiner Version 3.0.2 - April 28, 2013 - Receive failures in recv_line should unconditionally fail. - Use sock_blocks function for stratum send and receive. - Avoid applog under stratum_lock in __stratum_send. - Create an OS specific sock_blocks function. - There should be no error response code with return value 0 in recv_line. - Check for errors on stratum recv for any recv return value less than 1 and only parse the response if it's positive. - Avoid applog under stratum_lock in recv_line. BFGMiner Version 3.0.1 - April 24, 2013 - Bugfix: configure: Move actual roundl macro back to miner.h after math.h should be included - Bugfix: configure: Use dummy pointer to correctly detect roundl when conftest main is missing argc parameter - Bugfix: configure: Use variable argument to roundl to prevent compilers from optimizing it out entirely - Remove bitstreams from Windows binary distributions (README directs users to download source and copy them) - make-release: Remove autom4te.cache from distributed source - Bugfix: Omit --no-opencl-binaries option from build if OpenCL is not being compiled - Bugfix: Check that all pools have URIs set before starting - Bugfix: bitforce: Make noncebuf large enough for max qresults +1 (for OK line) - opencl: Ability to avoid using binary kernels with new --no-opencl-binaries option - README: Include jansson PKG_CONFIG_PATH in example for Mac - Include trailing \0 with coinbase sigs if there's room - Differentiate socket closed from socket error in recv_line. - Add new best share info to verbose logging. - Add notice for when network diff is changed. - convert sleep(const) to nmsleep() - Rename longpoll threads according to what pool they're associated with - miner.php report 'Last Valid Work' as time before request - API V1.25 - add 'Last Valid Work' time for each device - add 'count' to customsummarypage 'calc' - Bugfix: ztex: Initialize fw_buf pointer to NULL so a free before allocation is safe - Cleanup when stratum curl fails to initialise. - LTC text typo - Recreate cURL for new stratum connections, and clear stratum_notify on suspending them - clear_stratum_shares: Rename diff_stale variable to diff_cleared - MMQ it's a bitstream - Update a pool's last work time when the work is popped as well as staged. - Extend stratum connections another minute (total 2 minutes) after the last work item was staged, and maintain last_work_time for non-stratum pools. - Fix --benchmark generating valid work for cgminer. - Bugfix: Correct pdbuilder result directory - Omit add_serial_all code when serial support is not wanted - Use configure to detect presence of roundl to avoid redefining an actual function (possibly inline) - Bugfix: roundl: Add needed parenthesis to perform ?: before + - Bugfix: ft232r: Defer allocating structure until after USB endpoint is successfully opened, so it won't leak in case of failure - Bugfix: ztex: Free bitstream in memory when done with it - Bugfix: Safely handle all-space cURL debug messages, should they ever happen - Silence warnings about poor format usage for quit() - Apply noreturn and printf-format attributes to quit() function definition - Bugfix: set_serial_rts get flags to manipulate them correctly - Bugfix: Missing return for /dev globbing - Bugfix: Free unused work when retrying failed lp request - Display processor name with thread disabled/re-enabled messages - Move best share to top summary line, and add network difficulty to block line - opencl: Default to phatk kernel for Mesa platform - opencl: Default to single thread with Mesa OpenCL - opencl: Check for Mesa OpenCL and avoid using binary kernels with it - bitforce: Never increase bulkqueue poll wait time during queue underruns - bitforce: Start off polling bulk queue every 100ms - bitforce: Log device queue size after getting bulk results - bitforce: Ensure bulkqueue polling occurs at least once a second - opencl: Include OpenCL platform in kernel binary filenames - bitforce: Use bulk queue mode for all SC devices - Bugfix: bitforce: When reinitializing, free all known works to avoid decrementing reset queued counter - bitforce: Handle timeout during ZOX as cleanly as possible - Bugfix: bitforce: Make reinitialization more complete and safe - Bugfix: bitforce: Close opened fd if reinit fails - Bugfix: bitforce: Retry ZGX until device is NOT busy - bitforce: Log when zero queued results are received BFGMiner Version 3.0.0 - April 5, 2013 - Update libblkmaker to 0.3.0 - debian: Include new api-example.py in docs - added example for Python using the RPC API - added SPEC file for SUSE distributions - Bugfix: bitforce: Free initialization data to avoid trivial one-time memory leak - Support for local submission of found blocks (GBT only) - bitforce: RPC pgaset fanmode 0-5 for manual fan control - bitforce: More debugging information - Bugfix: modminer: Since RPC always includes the temperature, we don't need to add it specially - bitforce: Expose dual temperature sensors to RPC - bitforce: Support for up to 2 temperature sensors per processor - Bugfix: bitforce: BFP_QUEUE: Attempt to recover from extra queue results, or the next job finishing early - bitforce: Always send a new job ASAP after flushing the queue - bitforce: Implement "Queue Job Pack" (ZWX) and use it for XLINK devices to avoid USB latency issues - bitforce: Ignore INPROCESS added to ZOX response - Implement minerloop_queue for devices that process work items too fast to keep track of which one they're currently working on - bitforce: Split ZOX command into its own function - Bugfix: DevAPI: Free work when preparing it fails - DevAPI: Abstract get_and_prepare_work for minerloops - DevAPI: Move select() logic from minerloop_async to do_notifier_select - Clarify stratum mining.set_difficulty debug log message - No longer call configure from autogen.sh - Bugfix: bitforce: Ensure result_busy_polled gets set for queue mode to avoid unnecessary 10ms wait times - Bugfix: bitforce: Use common code for end of job_get_results, so queue results don't short-circuit timing code - Bugfix: bitforce: Ensure "OK" doesn't remain in queued results buffer - Bugfix: bitforce: next_line needs to increment beyond the newline character - Update README for x970 memdiff values. - Do not scan other gpu platforms if one is specified. - Update README for sync objects on windows. - Add information for setting gpu max alloc and sync parameters for windows with scrypt. - Whitelist AMD APP SDK 2.8 for diablo kernel. - Show pool number in switch message - Clear just the socket buffer when we don't care what is left in a stratum socket. - Clear the stratum socket whenever we are closing it since the buffer is going to be reused. - Do not continue work from a stratum pool where the connection has been interrupted. - Close any existing stratum socket if we are attempting to restart stratum so the pool knows the connection has gone. - Show mechanism of stratum interruption if select times out. - Make stratum connection interrupted message higher priority to be visible at normal logging levels. - API add 'Network Difficulty' to 'coin' - avalon: if all result are wrong in one batch read. reinit the avalon - avalon: record the last result temperature info - delay when close avalon; only record matched result - avalon: fix no_matching_work only count when debug - avalon: minor change - avalon: add idle code - avalon: fliter the temp_max >= 100, print the result for debug. - avalon: export more data to API stats - avalon: add default chip frequency - avalon: fix the work_i3 init - avalon: add reinit_device - avalon: the temp_history_count base on timeout - avalon: fix mistake on adjest_fan - avalon.c: fix the copyright - bfgminer-rpc: add -o option: no format, only the result - avalon: update fan pwm - avalon: update the FAN_PWM MAX/MIN - avalon: minor change - avalon: overclock code - avalon: fix the display - avalon: minor change - avalon: fix the fan/temp control - avalon: fix the temp_avg - avalon: fix temp - avalon: add fan/temp control - avalon: add FAN speed factor - avalon: add TODO on fan/temp control. cleanup detect - avalon: add the gate_miner bits - avalon: only send one byte on reset - avalon: add support on send 2 bulk taskes at begin - avalon: fix the hash_count return - avalon: fix the LOG_WARNING - avalon: add comment on hash_count - avalon: WORKAROUND on hashrate - avalon: update max miner_num - avalon: add more info on api - avalon: add nonce_elf and more info on match miner_num - avalon: change reset to 300ms - avalon: move bulk buffer to it's info structrue - avalon: more work on hashrate and read_count - avalon: add baud 38400 support - avalon: fix nonce_range EB - avalon: fix the hashrate wrong - more info on avalon API - avalon: fix the nonce_range EL - avalon: fix the read count - avalon: more work on nonce_range - avalon: read() times and send delay fixed - avalon: add the send delay option - avalon: print out fan/temp info - avalon: add the result info (fan/temp etc) - avalon: more check on hardware error - avalon: more work on get_work_count - avalon: now we have dynamic get_work_count - avalon: more work on parameters - avalon: add timeout parameter - avalon: baud as parameter now - avalon: send work pitch should be : (15*(8+2)*4/19200)s - avalon: more work on match work - avalon: fix free_work - avalon: continue on reset work. wait for buffer empty - avalon: add options, if write() error. sleep(1) before reset() - avalon: more cleanup - avalon: finish read when Buffer empty - avalon: fix the nonce EB issue - avalon: MORE work - avalon: fix the EB/LB issue - avalon: some cleanup - avalon: fix the first configure task - more work on the avalon buffer - avalon: fix the BIG_ENDIAN issue - avalon: Fix the buffer statu - change defines to avalon parameters - fix the cts return - avalon: change the data to uint8_t, add some test temp code - avalon: fix task init - avalon: more data format work - change to avalon data format - debug: add a debug hexdump.c - avalon: add some code on match work - avalon: try to correct the pool_status and dev_status - avalon: more work on multi-works - avalon: more work on read - avalon: more work on get results - more RTS code on avalon.c/h - more RTS code - avalon: some cleanup - avalon: more work on new work queue structrue - fpgautils.c: use lancelot as target - avalon: since we submit task as bulk data. modify again - add scanhash_queue - renmae avalon.h to driver-avalon.h - fpgautils.c: add get_serial_cts - understand the avalon protocol more - avalon: new software structrue but target as lancelot - add avalon.h - avalon: fix warning - avalon: add TODO comments - more AVALON defines - avalon: more work - add driver-avalon.c - add avalon support to automake - Default to work queue mode on BitForce SC devices - bitforce: Implement support for non-contiguous XLINK slave addressing - gnulib: stdint: fix build with Android's Bionic fox x86 - gnulib: stdint: Improve support for Android. - gnulib: stdint: Add support for Android. - Check for ?e##toh macros independently from hto?e## - If pthread_cancel is missing/emulated, set asynchronous thread cancel type on stage, watchdog, watchpool, and longpoll threads since the emulation cannot support deferred cancellation - If pthread_cancel is missing (Bionic/Android), emulate it using pthread_kill and pthread_exit - configure: Intelligently detect what flags/libs get us working pthread, and define HAVE_PTHREAD_CANCEL if pthread_cancel is available - Bugfix: Initialize mutex_request to invalid so devices that don't use it (bitforce) don't try to - RPC: pools: Add "Message" to show last client.show_message received over stratum - Stratum: Support client.show_message method - Don't retry without resume support, if the first attempt just timed out - Bugfix: minerloop_async: Intelligently handle work updates and device disables during transitions - Bugfix: minerloop_async: Free old (unused) prepared work when replacing it with an upgraded one - Bugfix: Free pool sessionid before replacing it - Bugfix: Stratum: Address dereference-after-free and memory leak introduced in resume support - Stratum: If old protocol fails as well, try to resume again next time around - Bugfix: Stratum: Only failover to old mining.subscribe protocol if the previous attempt was the new one (fixes a flood of retries) - Try to extract the sessionid associated with mining.notify on 3rd level array and submit it along with the userid to support mining resume, failing gracefully and restarting if the pool rejects it. - Cope with misread sessionid on stratum for now. - Use the sessionid as passed on stratum connect to attempt to resume a connection once and then clear it if it fails, to use a new connection. - Move to storing the nonce1 in the work struct instead of the sessionid for the now defunct first draft mining.resume protocol. - Only continue submitting shares with mining.resume support on stratum when the session id matches. - Provide support for mining.resume with stratum, currently re-authorising after successful resumption pending finalising of the protocol process. - Provide basic framework for restarting stratum depending on whether resume support exists or not. - Abstract out the setting up of the stratum curl socket. - Remove redundant setting of strings to NULL since the whole work struct is zeroed. - Only clear stratum shares mandatorily on stratum dropouts when the pool does not support resume. - Stratum: Keep trying to submit shares, even across reconnects - Use new select loop primitives in submission thread - Bugfix: Missing pool_no parameter to applog for no-stratum-sessionid debug message - Do as much outside of mutex locking of sshare_lock as possible. - Remove last reference to struct work used outside the sshare_lock in submit_work_thread - Unlock the sshare_lock in submit_work_thread when all references to work and sshare are complete. - Bugfix: Copy and free sessionid on work objects - Add timestamps to stratum_share structs as they're generated and copy the stratum sessionid if it exists to stratum work generated. - Store session id for stratum if the pool supports it for future mining.resume support. - Keep the unique id of each work item across copy_work to prevent multiple work items having the same id. - x6500: Never consider processors idle if they're enabled - x6500: Make mutex management cleaner by blocking device select loop during idle get_stats - Bugfix: minerloop_async: Always refer to real thread for select loop - Bugfix: Initialize work_restart_notifier[1] to INVSOCK instead of -1 to be portable - ztex: Use restart_wait to react quicker to work updates - Handy TIMEVAL_USECS macro - Restore blocking restart_wait function with nearly identical semantics as old one - Bugfix: bitforce: Rework sleep delay adjustment logic to properly deal with more accurate timing readings (added in device API update) - Hidden --force-rollntime option for getwork pools (use like --pool-priority, after each pool definition) - Include processor id in get_work logging - Support for BIP23 BPE request target extension via new --request-diff option - Hidden option to reduce "work update" messages to debug level: --quiet-work-updates - Change "work restart" to "work update" in messages to reflect reality more accurately (no work is lost), and normalize case of "longpoll" - HACK: Since get_work still blocks, reportin all processors dependent on this thread - Move FD_SETSIZE definition to configure so it affects everywhere it needs to - Move absolute_uri function to util.c - Remove now-unused blocking-wait code (restart_cond, restart_wait, and stale_wait) - Bugfix: bitforce: Zero hashes complete if we get an invalid response - HACK: Since get_work still blocks, reportout all processors dependent on this thread - bitforce: Support for work queue protocol on BitForce SC devices - Use new double-stage format for SC devices - modminer+x6500: Expose frequencies to API in terms of MHz to be consistent with ztex driver and cgminer - bitforce: Replace (bool)cgpu->nonce_range with (enum)bitforce_data->proto - bitforce: XLINK support for multiple processors - bitforce: Prepare log messages for XLINK by separating into proc and dev messages - bitforce: Always use fd/mutex pointers on actual device, to prepare for XLINK support - bitforce: Get fd/mutex pointers only once per function - bitforce: Abstract commands to bitforce_cmd1 (single-stage) and bitforce_cmd2 (double-stage) functions - bitforce: Debuglog device information during detection - Bugfix: Missing includes needed on Windows - Bugfix: Use waddstr instead of wprintw to display completed device summary line, so literal %s don't get interpreted as formatting options - Bugfix: bitforce: Avoid polling continuously between work restart and job completion - bitforce: Use poll device API when job_get_results needs to wait - bitforce: Use poll device API when job_start needs to wait - stale_work_future function to determine in advance if a work/share will be stale at some future time - bitforce: Minimally refactor to adapt to new minerloop_async - minerloop_async: Break out of select on work_restart_notifier - Replace UNIX-only work_restart_fd pipe with portable work_restart_notifier - Bugfix: Clean out unused variables from minerloop_async - Move new device API code to new deviceapi.c source file - Make minerloop_async more async, using some callbacks to handle event completions - Split part of minerloop_async into do_get_results, and a bit other reorganization - Abstract select_timeout function to convert a realtime timeval to a timeout pointer for select() - Split part of minerloop_async into do_process_results, and don't allow api->job_get_results to return hashes - Split part of minerloop_async into do_job_prepare and do_job_start - Initialize thr->tv_poll to -1 (disabled) - Update the hashmeter one last time before disabling a device - minerloop_async: Break out of select for wakeup notifications - Replace mining thread queues (which were only used for wakeup pings) with notifiers (which can be used with select and co) - Unify all mining thread wakeup to mt_enable (simplifying code) - Bugfix: get_statline: Correct device summary status, only showing DEAD or OFF if it affects all processors - Working processor disable/enable with new async minerloop (currently gets stuck if all processors disabled) - Bugfix: get_statline: Only care about the processor status if --show-processors is set - Bugfix: watchdog: Use processor thr_info even if it isn't a real thread - Only support thread-per-device or N-threads-per-processor; simplify work_restart check - x6500: Remove mutex, since driver is single-threaded now - Bugfix: Update utility every get_statline call, and include every processor involved - HACKING: New text file to document the internal workings of (currently) the device API - Bugfix: mining_threads is now a total of thr_info objects, not necessarily actual running threads - x6500: Working (but incomplete) asynchronous/single-threaded driver - Incomplete (but workable) asynchronous minerloop - Core support for managing multiple processors from a single thread - Allow device drivers to implement their own minerloop - Move cgpu_info and thr_info initialization to main, and ensure all get initialized before starting any threads - Refactor and simplify miner_thread (no major behavioural changes) - Move difficulties to end of share result message, so they can be made to line up nicely - Bugfix: Consolidate share result message code (including fixing displayed hash portion for stratum) - miner.php: Include ProcID in Device column as a letter - Show summaries per-device unless --show-processors is used (also available on Display TUI menu) - Order next_proc linked list in processor id order - Consolidate processor summary line generation for TUI and text-only modes - RPC: Update to include ProcID so multiprocessor devices can be understood correctly - RPC: Common function for adding device-identifying fields - modminer: Make single-processor statline look like other temperature-only statlines - modminer: Split each FPGA into its own logical processor (in the same device still) - modminer: Get mutex pointer only once per function - ztex: Combine master+slave devices into a single multiprocessor device - Preformat dev_repr (device representation) and proc_repr (processor representation) once for use everywhere - x6500: Split each FPGA into its own logical processor (in the same device still) - x6500: Get mutex pointer only once per function - Minimal support for defining devices with multiple logical processors - Rename all README files to standard README.* style BFGMiner Version 2.10.6 - April 5, 2013 - Bugfix: Restore missing variable - Bugfix: openwrt: Never include _ in platform name - Bugfix: Fixed typo in bfgminer-rpc usage - pool_active: Ensure temporary curl is always cleaned up - Try to find jansson via pkg-config first, and fall back to checking system defaults if that fails - Attempt to find libjansson via pkg-config if AC_CHECK_LIB fails - Update scrypt readme re drivers and sdk. - Bugfix: README: Move --device out of GPU only options - Update .gitignore - Added bfgminer-rpc binary to .gitignore - Bugfix: Actually change to the newly selected pool when statum is inactive and it decides to change - Bugfix: modminer: Properly fail on dynclock error - Bugfix: opencl: Clean pc_data->work before freeing pc_data - Bugfix: Correct order of libblkmaker libraries so static builds work - Bugfix: Need to ensure __BIG_ENDIAN__ is defined before including uthash.h - Bugfix: Stratum: When destroying cURL easy handle, be sure to clear pool stratum_curl pointer - Bugfix: bitforce: Fix warning - Bugfix: Stratum: Properly handle non-integer "id" for client.get_version requests - json_dumps_ANY utility function to portably implement json_dumps(..., ... | JSON_ENCODE_ANY) - Bugfix: bitforce: Free old name when updating it on reinitialization - Stratum: Include pool number in send/recv protocol logging - Include pool number in stratum thread name - API always report failed send() replies - API.java allow partial reads - Bugfix: Stratum: Use curl_easy_cleanup to close connection, so cURL understands what is going on - Bugfix: hash_pop: If a work should be rolled, use a clone of it rather than consume a rollable work - openwrt: Move Makefile into a bfgminer subdirectory to avoid symlinking issues - openwrt: Use --with-curses=ncurses to avoid ncursesw dependency - configure: Support --with-curses=FOO to look for curses implementation in libFOO - Set pool socket to INVSOCK after closing connection, just in case - Clean up compiler warnings - Bugfix: Check that pool is active one last time before selecting it - Bugfix: Trim whitespace (like newlines) off the end of debug info from libcurl - Bugfix: submit_nonce: Backup the original work->blk.nonce since the miner code uses it to track work consumption - Bugfix: Scheduler needs to unpause disabled devices, even if it isn't waking them up - Bugfix: Use SOCKETTYPE for notifiers, to avoid potential overflow on Win64 - Bugfix: Some versions of MingW define localtime_r, but don't handle the timeval.tv_sec case that we use; so undef any preexisting one and use our own - Bugfix: reinit_gpu: Remember the selected device to correctly change properties of - Bugfix: cpu: reinit_device hasn't worked since 93b284d, so just remove it entirely instead of letting it screw with thread 0 - Document necessity to run ldconfig and possibly configure ld.so - Bugfix: Complete startup after just one pool is found active, no need to wait for the rest - Bugfix: Update links - miner.php: Replace PGA dev number with concatenated device ID - Bugfix: miner.php: Display devices with aligned columns instead of assuming they come out of the RPC aligned - Bugfix: miner.php: Silence PHP "local timezone" warning - Bugfix: api-example: Try to use BSD sockets on any non-Windows platform - Bugfix: stratum: Delay mining.get_transactions request until after auth has succeeded, so its failure doesn't abort the connection (also avoids any delay from a large result) - --no-getwork option to disable getwork protocol support - Clarify dependencies with Debian/Ubuntu package names BFGMiner Version 2.10.5 - February 8, 2013 - Bugfix: Actually increment template_nonce when we use it - Change file modes. - Fix logic fail on partial writes with stratum send that was leading to corrupt message submissions. BFGMiner Version 2.10.4 - February 7, 2013 - New platform ports: OpenWrt and Win64 - Update official Windows build compiler and libraries: - - Upgrade GCC from 4.6.3 to 4.7.2 - - Upgrade libusbx from 1.0.10 to 1.0.14 - - Upgrade jansson from 2.3.1 to 2.4 - - Upgrade libcurl from 7.26.0 to 7.28.1 - - Upgrade pthreads-win32 from 2.8.0 to 2.9.1 - Bugfix: Release libudev handle when ID_MODEL doesn't match what we're looking for - openwrt: Script to build for multiple platforms easily - openwrt: Bitstreams should be "all" arch - Working OpenWrt Buildroot Makefile - Do not enable the pool disable on reject feature unless explicitly enabled with --disable-rejecting. - Check for calloc failure for completeness in gen_stratum_work. - Cache the coinbase length to speed up stratum work generation. - Cache the header length when generating stratum work to avoid calculating it on every work generation, and to only need one alloc+sprintf, speeding up work generation. - Use heap ram for coinbase in gen_stratum_work, zeroing it before use. - Provide a wrapper for aligning lengths of size_t to 4 byte boundaries. - Bugfix: ztex: While 1.15y can finish highspeed FPGA config immediately, at least 1.15x needs some delay - Use CURLOPT_OPENSOCKETFUNCTION to intercept the socket being created for stratum, in order to workaround CURLINFO_LASTSOCKET breakage on Win64 - make-release: Update for Win64 and bfgminer-rpc.exe - Use localtime_r instead of localtime, including a Windows implementation that handles Win64's broken struct timeval.tv_sec - Use standard execv arg type on Win64 - Bugfix: Correct various size mismatches - Ensure winsock2.h is always included before windows.h - Bugfix: Add necessary Winsock library to bfgminer-rpc linking - Bugfix: Remove dependencies of compat.h on miner.h for Windows (moves timersub/timeradd to compat.h where it belongs) - modminer: Raise default/maximum clocks to 210 and 250 respectively - modminer: Use better-performing X6500 overclocker bitstream - Disable libusb linkage/usage when neither X6500 nor ZTEX support is desired - Add support for "--scan-serial all" via simply globbing /dev - fpgautils: serial_autodetect implementation using sysfs - fpgautils: Unified serial_autodetect function to find a serial device regardless of the underlying method - fpgautils: Look for bitstreams in ../share/bfgminer/ too - Bugfix: Ensure curses library is always linked in NCURSES_LIBS, to avoid unnecessary dependencies for (non-curses) tools - Bugfix: GBT: work->data is always little-endian, but libblkmaker wants the nonce in native-endian - Bugfix: cpu: Corrections necessary to get 'c' and 'cryptopp' algorithms working on big endian - Bugfix: Sanity check for bits exponent in real_block_target - Bugfix: cpu: Increment nonce after checking (rather than before), to avoid skipping the first nonce of each scanhash call - cpu: via: Only swap back the nonce, rather than all data - cpu: Minor optimization by checking H==0 before calling fulltest - Bugfix: Skip yasm check when building for non-x86 platforms - Allow --scantime alias to --scan-time - Build bfgminer-rpc program from api-example.c - Bugfix: Remove miner.h include from api-example.c since it isn't needed and pulls in libblkmaker - Make wrapping consistent at 79-80 characters per line - Bugfix: Correct numerous misspellings, typos, etc - Bugfix: Prefer using a non-frozen mining thread for watchdog - Bugfix: x6500: Expose x6500_fpga_data even if JTAG reset/detect fail, since it is still used to store temperature info if the other FPGA initializes - Adding ZTEX Windows guide from Jason Snell BFGMiner Version 2.10.3 - January 22, 2013 - Revert "x6500: Whenever we get a hardware error, purge buffers just in case of read/write desync" - Bugfix: libblkmaker: Check that zero-padding on base58check input matches output (needed to properly reject addresses with too many or too few prefix/pad '1's) - Bugfix: Free bin2hex output in __update_block_title - Bugfix: Allocate space for the terminating null byte on new current_hash - Display tail end of prevblock hash rather than start+32bits - Try to extract block height from coinbase scriptSig, when mining stratum - Display next block height when using GBT - Use suffixes for target-difficulty also, in share accept/reject loglines - Bugfix: Implement common target_diff function, fixing scrypt-specific bugs in and simplifying common code shared by set_blockdiff, calc_diff, and share_diff - Set DISPLAY to :0 by default (on non-Windows) - Bugfix: Reset pool bytes received when zeroing stats - miner.php trim trailing zeros on some of the STATS numbers - Semi-Cherrypick: API stats - include pool network bytes + in miner.php - Best Share readme - API zero - zero statistics - all or bestshare - with optional on screen summary - api.c pgaenable not re-enabling the device - plus related debug - diffexactone pool diff1 used for share value calculation is ffffffff... not 100000000... :P - miner.php user/pass fix 'usr' is readonly - miner.php optional user/pass login restrictions - zero (most) API stats - Remember best share per pool and return in API pools - ztex: precheck the secondary solutions to avoid hw errors the ztex bitstreams gives back the latest checked nonce and its hash7 value and two possible solutions. - Bugfix: configure: if blocks require at least one command, so fill with true - Bugfix: Only log stratum resume if it was actually "idle" before - Zero the best share string memory when zeroing stats. - Change the pool stratum socket buffer to new cgminer implementation, to allocate it in a grow-only fashon and reduce virtual memory fragmentation at the expense of CPU time. - Differentiate socket full from sock full. - Allow stratum to startup without notify but check it is valid before creating stratum work. - Do not try to generate stratum work unless the notify command has succeeded. - Document Mac OS X configure usage with Homebrew pkg-config path - Clean up post-configure display of compile environment - Bugfix: If native ncurses detection fails, print "none?" result before moving on to try AC_SEARCH_LIBS scan - Fix more printf-format non-compatibilities - Update windows-build.txt BFGMiner Version 2.10.2 - December 27, 2012 - Update documentation to include block difficulty - Reset all stats when requested - Reset total diff1 shares when zeroing stats as well to show correct work utility. - Update documentation. - Parse anything in the stratum socket if it's full without waiting. Empty the socket even if a connection is not needed in case there are share returns. - Provide a mechanism to zero all the statistics from the menu. - Display the current pool diff in the status line. - Display block diff in status line. - Generalise the code for solving a block to enable block solve detection with scrypt mining. - Generate the output hash for scrypt as well and use the one function to set share_diff. - Use one size for scratchbuf as a macro in scrypt.c - Remove the unused sha224 functions. - Check staged_rollable under staged lock, when cloning available work. - scrypt_diff uses a uint64_t as well. - Correct target for stratum support with scrypt mining. - Bugfix: Ensure nonces are put in data as little-endian in test_nonce* - Add low-level debugging info for data_buffer (some only enabled with -DDEBUG_DATABUF) - Make all_data_cb fwrite-compliant by returning nmembs, and check for unlikely overflows - Bugfix: Need to do extract_sockaddr before trying to initiate stratum (erroneous http URI usage, except at startup) - Bugfix: Update last GBT work in pool_active before staging it, since otherwise it could possibly be consumed before we copy it - Bugfix: Address Windows-specific formatting issues (including lack of support for %ll*) - Bugfix: ztex: Correct formatting for reset failure error - ztex: Fix formatting in a debug message - cairnsmore: Don't bother timing dynclock detection, since there's no standard way to log it accurately - Correct formatting in FPGA drivers - opencl/adl: Fix formatting to fit strict rules - Explicitly cast all_data.buf to char* for debug printing - Follow strict time_t handling rules - Use GNU format-checking attribute when available for applog BFGMiner Version 2.10.1 - December 21, 2012 - libztex: fixed a typo - libztex: check returnvalue of libusb_claim_interface() and release the interface in case of early exit - Bugfix: submissions: Skip FD_ISSET when fd==-1 (let the next select setup deal with cleaning them out) - Bugfix: Remove sws from write_sws list when discarding it due to pre-send stratum disconnection - Bugfix: Shutdown stratum socket when initiate fails, so it doesn't linger - Bugfix: Clear stratum receive buffer when initializing, in case there was extra unprocessed data in it from a previous connection - Stop all work from the current pool if it's a stratum pool once it is disconnected since it will be invalid upon reconnecting. - Discard all staged work from stratum pools as well as the shares upon disconnection since all the work becomes invalid. - Use correct cbreak after 15 second delay when no pool is found alive. - modminer: Set default clock frequency to user request so it sticks better - modminer: Make valid frequency range consistent: 2-230 - Allow stratum to work with scrypt. - MMQ add api pgaset for clock - API V1.23 - new pgaset command, to be used soon - Protect the best_share/best_diff values under control lock. - Bugfix: modminer: Return failure to change frequency when device reports it - opencl: Look in the right place for OpenCL library on Mac OS X - Bugfix: AC_C_BIGENDIAN is reported to have problems, and invasive even if buried in a conditional, so don't use it - Bugfix: Check for bswap_* first, to avoid redefinition based on other variants - Bugfix: autoheader isn't smart enough to figure out variable defines, so use AH_TEMPLATE for each possible header - Check a stratum pool hasn't gone dead while being a backup pool and missed having its idle flag cleared. - Fix null pointer issue when one chip on an X6500 is not initialized yet when reading temperature. - Hot-patch broken libcurl pkgconfig CFLAGS found in libcurl's Windows binaries - Update OpenCL 1.2 headers from http://www.khronos.org/registry/cl/api/1.2/ - Reorganize detection of platform byteswap macros and endian to be more robust using autoconf - Move new bandwidth-based Efficiency to status line - Replace work-based efficiency with new bandwidth-based efficiency - Bugfix: Pull out GBT request collapsing since it is no longer needed with new get_work main loop - Bugfix: Free unused work when waiting on external GBT request - README: Explicitly mention automake dependency - README: Update AMD APP SDK URIs - Bugfix: Free shares discarded before beginning submission - Bugfix: Discard stratum shares waiting for a writable socket, if the pool disconnects in the meantime - Bugfix: Always let watchpool thread handle dead pool recovery (including for stratum-only pools) - Bugfix: Avoid lingering stratum_auth when connection is lost - API-README explain custom page extensions in miner.php - miner.php add a sample group pool report - miner.php allow where,group,having on cumstom pages - Bugfix: Hook CURLOPT_DEBUGFUNCTION to count actual bytes sent/received by libcurl - Bugfix: Reset pool transparency_time when connecting to stratum - Bugfix: Immediately discard shares found on disconnected stratum pools, since there is no way to submit them - Bugfix: Decrement total_submitting when stale shares are discarded before any submission attempts - Bugfix: Only try to compare stratum job_id for work that has a job_id (ie, ones that came from stratum) - Bugfix: Recheck has_stratum even if the pool hasn't changed, in case pool has switched to another protocol in the process; also only delay 5 seconds before retry if pool is the same - Bugfix: Try GBT if no pool protocol is known (can occur in the process of stratum failover to GBT) - Bugfix: Correctly track discarded stratum shares, and log them as "disconnect" in sharelog - Check for EWOULDBLOCK when supported in send and recv as well. - Use the raw send() command instead of curl_easy_send since curl raw socket usage introduces random bugs on windows. - Use raw recv() command in place of curl_easy_recv since the curl implementation introduces random bugs on windows builds when the recv fails. - miner.php when displaying a single rig, add prev/next rig buttons if they exist, next to refresh - miner.php allow custom page joins for STATS - miner.php - include windows easyphp link - driver-ztex: use the correct size for the swap array - API stats - display pool byte transfer stats - Pool store data transfer stats - Benchmark incorrect work size - ChangeLog refer to NEWS - driver-ztex: search the complete noncerange based on the actual speed - API-README update - api use a dynamic io buffer, truncated before it reaches the current ~64k limit BFGMiner Version 2.10.0 - December 11, 2012 - Bugfix: Free work before replacing it with clone - Bugfix: Since we are using pipes for select notifier on *nix, we need to use read/write there - Bugfix: Winsock needs send/recv for sockets, not write/read - Bugfix: opencl: Initialize pc_data to avoid clean_work checking uninitialized pointers - Bugfix: Correct parenthesis in bind() call in Windows notifier_init - Include Windows error messages in notifier_init errors - Include prctl header for thread renaming to work. - Set tv_idle time if a pool is not active when input from the menu. - minor unlikely zero pointer test - BeaverCreek doesn't like BFI INT patching. - Only stratum pools that are idle need to be kicked via cnx_needed. - Do not do any setup if opt_api_listen is disabled in api.c. - libztex: in case the selectFpga() failed set the selected fpga to unknown - Only set the lagging flag for select_pool() on failed getwork if we're not in opt_fail_only mode. - driver-ztex: support for broken fpga on a multifpga board - libztex: use a function for the twice called firmware reset code - libztex: removed an unused struct member (ztex->valid) - Set the pool lagging flag on startup to avoid it being shown initially, and only unset it once the maximum number of staged work items has been reached. - libztex: Include compat.h for substitute libusb_error_name (on older libusb versions missing it) - Suppress warning about "succeeded" not being used in finish_req_in_progress for now - Bugfix: Always give the get_work thread a curl, regardless of other outstanding curls in use - Bugfix: Failover after even a single job-request failure (or else it takes too long on timeouts) - Bugfix: Need to remove and re-add curl easy handles from multi to start a new request - Access total_submitting under mutex lock to avoid any potential races, and increment it as soon as we queue the submission up - Just leave the submit_work thread running persistently - Bugfix: Restore work->pool after prepare_rpc_req since clean_work now clears it - Bugfix: Now that stage_work is trying to manipulate staged_work in the same thread, clone_available needs to stage it outside of its own lock - Make main() the getwork scheduler once everything is set up, so that all app exits use the kill_work and quit paths. - Set successful connect to true on auth stratum to allow summary on exit from single stratum pool. - Hash_pop should signal further waiters on its own pthread conditional in case there are multiple waiters. - Check the job_id has not changed on stratum work when deciding if the work is stale as might occur across disconnections. - Perform pool_resus on getwork pool that generates work in getwork_thread. - Set pool lagging message for getwork pool that falls to zero staged in getwork thread. - Stage extra work when the primary pool is a getwork pool without rolltime. - Do not try to clean up twice if kill message is given. - Only recalculate total_staged in getwork thread if required. - Include the correct config header in libztex and include it before other includes. - Implement a completely new getwork scheduler. Stage all work from the one thread, making it possible to serialise all requests minimising the number of getworks requested or local work generated. Use a pthread conditional to wake up the thread whenever work is removed to generate enough work to stay above the watermark set by opt_queue. Remove all remnants of the old queueing mechanism, deleting the now defunct queued count. - Bugfix: Clean up share hashing and target checks, fixing share difficulty calculation for above-target would-be-shares - Use templates from pool_active and longpolls without fetching more unnecessarily - Try to avoid requesting GBT jobs when there is already a request in progress that will likely provide sufficient work - Reuse most recent GBT job if in get_work_thread if it isn't stale - libztex: fixed some warnings and removed some whitespaces - Remove all references to the now unused workio_cmd structure. - Remove the old workio command queue thread, replacing it with a kill conditional to exit the program. - Remove getwork command from workio_cmd queues and do them directly from queue_request. - Begin tearing down the old workio command queues by removing submit commands from there and submit them asynchronously via their own threads. - driver-ztex: changed two pairs of malloc()/memset() to calloc() - libztex: Read bitstream file in 2kb blocks with simpler and faster code - Added the binary versions of ztex_ufm1_15d4.ihx and ztex_ufm1_15y1.ihx - libztex: Add firmware download support for ZTEX 1.15d and 1.15x - libztex: Factor out local version of libusb_get_string_descriptor_ascii() - libztex: Don't return error when a bitstream was already configured - libztex: Read bitstream file in 64kb blocks with simpler and faster code - libztex: Verify that the mining firmware is not a dummy firmware - libztex: Match mining firmware ZTEX descriptor against the dummy firmware - libztex: Start download sequence only after reading in the new firmware - libztex: Download mining firmware to all devices with dummy firmware - Update windows build instructions. - Set pool probed to true on successful authorisation with stratum to avoid it being pinged later. - Style changes. - Allow pool active to be called on stratum or disabled pools in the watchpool thread if the pool has not been probed. - lock (most of) the threaded statistics updates - README stats don't add up - Rearrange summary lines and include count of active submissions in progress - Defer submissions instead of blocking in pop_curl_entry - Run a single share submission thread asynchronously submitting all shares in parallel - Handle share submissions asynchronously, one at a time (still threaded) - Split up json_rpc_call so it can be used asynchronously in libcurl-multi - Split submit_upstream_work into _request and _completed stages, pulling out json_rpc_call - Bugfix: Adjust USB_* variables to new LIBUSB_* names - Bugfix: Avoid double-free due to realloc_strcat moving memory around - Bugfix: Stratum connections might be needed for share submissions up to a minute after the last time they are used to generate work - Bugfix: Clean work before trying to generate new stratum work on top of it - Bugfix: modminer: Get rid of useless usbutils include - Make need connection return true if a pool is idle. - New --skip-security-checks option to allow miners to skip checks when it saves bandwidth - Skip stratum transaction download when there are no transactions - API add Best Share to summary - API lock access to some summary statistics (and copy them) - Enable backup stratum connections for getwork when the primary pool doesn't have longpoll aka solo mining. - Check for correct absence of opt_fail_only in cnx_needed. - Remove unused variable. - The specification for stratum has been elaborated to say that a changed diff applies only to new work so do not retarget when submitting shares. - Suspend stratum connections to backup pools when there is no requirement to potentially grab work from them. - Rename rename_thr to RenameThread to match cgminer - modminer: Adopt symbolic command names from kanoi - Make gen_stratum_work more robust by using a dynamically allocated array for the header in case bogus data is sent by the pool to avoid overflowing a static array. - scrypt_diff now returns a uint64_t - Support monitoring and reporting much higher diffs for scrypt mining, truncating irrelevant zeroes from displayed hash. - Pass ostate values around in scrypt to be able to extract full hashes if needed later on. - Revert "Handle crash exceptions by trying to restart cgminer unless the --no-restart option is used." - Provide helper function realloc_strcat to extend arbitrary length arrays based on string length. - Use base_work for comparison just for cleanness in __copy_work - Remove all static work structs, using the make and free functions. - Add pool no. to stale share detected message. - Add info about which pool share became stale while resubmitting. - Reduce extra slots in the max backlog for ztex to minimise memory waste. - Get rid of unused last_work in opencl thread data. - Do away with the flaky free_work api in the driver code which would often lose the work data in opencl and simply flush it before exiting the opencl scanhash. - Minor work handling restructure, including moving some stratum data from fixed-size buffers to their own heap allocations. - opencl: Use new dev_error function for REASON_DEV_NOSTART - Provide rudimentary support for the balancing failover strategies with stratum and GBT by switching pools silently on getwork requests. - Convert remaining modminer and bfl uses of usleep to nmsleep. - Convert libztex to nmsleep where possible. - Convert unreliable usleep calls to nmsleep calls in ztex driver. - Tidy up device error counts - Only increase gpu engine speed by a larger step if the temperature is below hysteresis instead of increasing it to max speed. - Convert pool not responding and pool alive message on backup pools to verbose level only since they mean a single failed getwork. - Use stratum block change from backup pools as an alternative to longpoll for pools that don't support LP. - Round some more static string arrays to 4 byte boundaries. - There is no need for the static arrays to be larger than required, so long as they're 4 byte aligned to appease ARM. - Hash1 is only used by the CPU mining code and never changes so remove it from the work struct and bypass needing to process the value for all other mining. BFGMiner Version 2.9.5 - December 11, 2012 - Bugfix: Copy share hash to work->hash before doing 4-byte flip required by fulltest - driver-ztex: libztex_setFreq() must be called before ztex_releaseFpga() - libztex: Make log messages say bitstream when refering to bitstreams - Increase FD_SETSIZE to 4096 on Windows - Bugfix: Use AC_PROG_CPP in libusb include subdirectory detection for improved portability - Bugfix: Free input memory after prioritising pools in TUI - Bugfix: Free filename entry for writing config file when done with it - Bugfix: Free stratum nonce1 before replacing it with new value on reconnect BFGMiner Version 2.9.4 - December 4, 2012 - Update libblkmaker to 0.2.1 - Count template number, and append it to the coinbase of templates without any cbtxn - Bugfix: bitforce: Always increment global hw error counter when incrementing device hwe - Bugfix: Correct order of printf-style arguments in cbappend fail - Bugfix: Capitalize "MHz" correctly - ztex: Correctly release mutex and reset FPGA if configuration fails - ztex: Harmonize low-speed FPGA configuration code with high-speed code - libztex: Silence warning: comparison between signed and unsigned - Count longpoll decodes as queued work since the count otherwise remains static. - Bugfix: Assign header-based rolltime before decoding work, so GBT expires overrides it properly - Look for libusb_init in -lusb, since FreeBSD has it there - Bugfix: Use pkgconfig for libusb when available, and try to guess the include path if not - Bugfix: FPGA-README: Correct idVendor in example MMQ udev rule - fixes target calc for mips openwrt - Bugfix: clear_work: Whether the template is in fact being freed or not, the work reference to it needs to be - libztex: Work around ZTEX USB firmware bug exposed by the FreeBSD libusb - README: Document solo mining usage - README: Update dependencies - Bugfix: We should never roll stale work - Ubuntu: Removing erroneous libssl dep again. GITHUB#94 - Bugfix: Clear out stratum share work before freeing it - Provide rudimentary support for literal ipv6 addresses when parsing stratum URLs. - Do not attempt to remove the stratum share hash after unsuccessful submission since it may already be removed by clear_stratum_shares. BFGMiner Version 2.9.3 - November 16, 2012 - Bugfix: Properly process new stratum jobs through test_work_current, even if old shares are still accepted, and copy submit_old flag correctly - Ensure pdiff 1 is always caught regardless of bdiff precision, and ceil all other cases to ensure we never lose valid shares - Check against a double for current pool diff. - Support for fractional diffs and the classic just-below-1 share all FFs diff target. - Check share target diff for best_share to be calculated when solo mining. - Store the full stratum url information in rpc_url for correct configuration file saving. - Put in a hack to prevent dud work from sneaking into test_work_current being seen as a new block. - Reset the work->longpoll flag where it will affect stratum work items as well. - Bugfix: Stratum does not guarantee notify messages every minute, so extend timeout to 2 full minutes - Bugfix: Always honour libblkmaker time limits - Always (debug)log when stratum template is updated by the pool - Bugfix: When a stratum connection is interrupted, ensure all work/shares for it are considered stale - Bugfix: clear_sock should return on socket errors - Bugfix: Force calculation of work_difficulty since set_work_target fails to consider the pdiff1000. - Get rid of unused warning for !scrypt. - Use select on stratum send to make sure the socket is writeable. - Cope with dval being zero in suffix_string and display a single decimal place when significant digits is not specified but the value is greater than 1000. - Pad out the suffix string function with zeroes on the right. - Failure to calloc in bin2hex is a fatal failure always so just check for that failure within the function and abort, simplifying the rest of the code. - Provide locking around the change of the stratum curl structures to avoid possible races. - Bump opencl kernel version numbers. - Remove atomic ops from opencl kernels given rarity of more than once nonce on the same wavefront and the potential increased ramspeed requirements to use the atomics. - Clear the pool idle flag in stratum when it comes back to life. - Display correct share hash and share difficulty with scrypt mining. - Show work target diff for scrypt mining. - Watch for buffer overflows on receiving data into the socket buffer. - Dramatically simplify the dynamic intensity calculation by oversampling many runs through the opencl kernel till we're likely well within the timer resolution on windows. - Align static arrays to 4 byte boundaries to appease ARM builds for stratum. - Update documentation. - Left align values that are suffix_string generated. - Share_diff should not be converting the work data to hex. - Update readme describing difficulty displayed on log lines. - Off by one error. - Prevent overflows of the port char array in extract_sockaddr. - Disable stratum detection with scrypt. - Display the actual share diff next to the pool required diff, using a suffix creation function to prevent values of >1000 being shown in their entirety. - Fix 4 * 0 being 0 that would break dynamic intensity mode. - Supplement other 64-bit endian swap macros - Bugfix: Fix htobe64 on big endian platforms that don't define it - Fix lack of htobe64 on mingw32. - Reinstate the history on dynamic intensity mode to damp fluctuations in intensity but use an upper limit on how much the value can increase at any time to cope with rare overflows. - Update to cgminer's newer dynamic intensity algorithm - Support for the stratum mining protocol. - Simplify target generation code. - Add support for client.get_version for stratum. - Use a 64 bit unsigned integer on the diff target to generate the hex target. - Update reconnect message to show whole address including port. - Look for null values and parse correct separate array entries for url and port with client reconnect commands for stratum. - The command for stratum is client.reconnect, not mining.reconnect. - Only copy the stratum url to the rpc url if an rpc url does not exist. - Implement rudimentary mining.reconnect support for stratum. - Ignore the value of stratum_active on calling initiate_stratum and assume we're always trying to reinitiate it, and set the active flag to false in that function. - stratum auth can be unset if we fail to authorise on subsequent calls to auth_stratum which undoes the requirement of setting it in one place so set it in pool_active. - Format Stratum submission-start debug the same way as other submissions - Bugfix: Set work_restart_id in gen_stratum_work for when work is reused to avoid thinking it's all stale. - Only auto-switch to Stratum internally, but save HTTP URI in case pool stops using Stratum; also always shows original pool URI on RPC - SHUT_RDWR is now always defined for us, so no need to check ifdef on LP hang - Implement --no-stratum option to disable autodetection - Show Stratum pools as "Strtm" protocol in "Pool management" TUI - Bugfix: BFGMiner doesn't use rpc_proxytype - Remove free that could segfault. - Use the stratum url as the rpc url advertised if we switch to it. - Count an invalid nonce count as a hardware error on opencl. - Count each stratum work item as local work. - Cope with one stratum pool being the only active pool when it dies by sleeping for 5 seconds before retrying to get work from it instead of getting work indefinitely. - Detect stratum outage based on either select timing out or receiving an empty buffer and properly re-establish connection by disabling the stratum_active flag, coping with empty buffers in parse_stratum. - Fix various modminer warnings on mingw. - Fix sign warning on windows build for bitforce. - Cast socketfail to integer since SOCKET is an unsigned int on windows. - Use the stratum thread to detect when a stratum pool has died based on no message for 2 minutes. - Only set the stratum auth flag once and once the stratum thread is started, use that to set/unset the stratum active flag. - Only hand off to stratum from getwork if we succeed in initiating the protocol. - Target should only be 32 bytes copied. - Use a static array for work submission data instead of stack memory. - Clear the buffer data before sprinting to it. - Clear work stratum strings before setting them and add them to debug output. - Drop stratum connect failed message to verbose level only since it's a regular probing message. - TCP Keepalive in curl is only in very recent versions and not required with regular messages on stratum anyway. - Move stratum sockets to curl infrastructure with locking around send+recv to begin support for proxies and ssl. - Make detect stratum fail if a proxy has been set up. - Stratum does not currently have any proxy support so do not try to switch to stratum if a proxy has been specified. - Windows doesn't work with MSG_PEEK on recv so move to a continuously updating buffer for incoming messages. - Alloca is unreliable on windows so use static arrays in util.c stratum code. - Begin support for mingw stratum build. - Add space to reject reason. - Parse the reject reason where possible from stratum share submission. - Pass json error value to share result function to be able to parse reject reason in stratum. - Don't try to parse unneeded parameters in response to mining.subscribe. - Remove the sshare hash entry if we failed to send it. - Change notify message to info level to avoid spamming repeatedly when a pool is down. - Check the stratum pool difference has not changed compared to the work diff when testing whether a share meets the target or not and retarget if necessary. - Bit error in target calculation for stratum. - Offset the current block detection to the prev block hash. - We should be testing for id_val, not id in parse stratum response. - Make target on stratum scale to any size by clearing sequential bits according to diff. - Correct target calculation in gen_stratum_work. - If a share result has an error code but still has an id, it is likely a reject, not an error. - Initiate stratum the first time in pool_active only, allowing us to switch to it on getting a failed getwork and detecting the presence of stratum on the url at that time. - Use 5 second timeout on sock full for now as a temporary workaround. - If no stratum url is set by the end of the detect stratum routine, copy the sockaddr url. - Make all buffers slightly larger to prevent overflow. - Make the stratum recv buffer larger than the recvsize. - Userpass needs to be copied to user and pass earlier to allow stratum authorisation to work with it. - Store a sockaddr url of the stripped url used in determining sockaddr to not confuse it with the stratum url and fix build warnings. - Decrease the queued count with stratum work once it's staged as well. - Allow the stratum retry to initiate and auth stratum in pool_alive to make sure the stratum thread is started. - Avoid duplicating pool->rpc_url and setting pool->stratum_url twice to itself. - Detect if a getwork based pool has the X-Stratum header on startup, and if so, switch to the stratum based pool. - Comment update. - Minor message change. - Create a work item from a "clean" request from stratum allowing the new block to be detected and the appropriate block change message to be given. - Use statically allocated stratum strings in struct work to cope with the inability to safely deallocate dynamically allocated ram. - Use the current pool when deciding whether to reuse work from a stratum source rather than the work's previous pool. - Copy the stratum url to the rpc url to avoid none being set. - Provide locking around stratum send operations to avoid races. - Submit shares from stratum through the abstracted submit share function detecting what message they belong to and showing the data from the associated work, and then deleting it from the hash. - Use a more robust mechanism to obtain a \n terminated string over a socket. - Abstract out share submit as a function to be useable by stratum. - Rename parse_stratum to parse_method as it is only for stratum messages that contain methods. - Display stratum as mechanism in status line when current pool is running it. - Count each stratum notify as a getwork equivalent. - Correct nonce submitted with share. - Extranonce2 should be added before coinbase2. - We should be hashing the binary coinbase, not the hex one. - Fix endianness of nonce submitted for stratum. - Check that stratum is already active in initiate_stratum to avoid de-authorising ourselves by subscribing again. - Begin implementing a hash database of submissions and attempt sending results. - Copy parameters from stratum work required for share submission. - Set lagging flag on first adding a pool to prevent pool slow warning at startup. - Fix work->target being a 32 byte binary in gen_stratum_work. - Store and display stripped url in its own variable. - Create machinery to divert work requests to stratum. - Generate the work target in gen_stratum_work, setting default diff to 1 in case it is not yet set. - Generate work data, midstate and hash1 in gen_stratum_work. - Generate header created from stratum structures in gen_stratum_work. - Generate merkle root hash in gen_stratum_work. - Generate the coinbase for generation of stratum based work. - The number of transactions is variable so make merkle a variable length dynamically allocated array and track how many there are for stratum. - Rename nonce2 to n2size reflecting that it's a size variable and not the actual nonce. - Provide rudimentary support for stratum clean work command in the stratum thread. - Cope with pools being removed in the stratum thread. - Use the pool sock value directly in the stratum thread in case it changes after reconnecting. - Create a stratum thread per pool that has stratum that monitors the socket and serves received data. - Check return value of stratum_parse. - Complete authorisation in stratum. - Implement stratum parsing of notify parameters and storing them in the pool stratum work structure. - Create helper functions for duplicating json strings to avoid keeping json references in use. - Append \n in the sock_send function instead of adding it when constructing json in stratum. - Don't keep any json references around with stratum structures. - Create parse_stratum function that hands off stratum parameters to other functions to manage pool stratum work struct variables. Implement mining difficulty setting. - Create helper functions for checking when a socket is ready to read on and receive a single line at a time. Begin stratum authorisation process. - Provide a helper function for reading a single \n terminated string from a socket. - Create a stratum work structure to store current work variables. - Test specifically for stratum being active in pool_active. - Detect stratum in common place when adding urls, and use a bool to tell us when it's active. - Remove unused add_pool_details5 - Fix warnings. - Extract and store various parameters on stratum init confirming successful mining notify. - Use existing socket macros and close the socket on failure in init stratum. - Initiate stratum and grab first json result. - Get detailed addressinfo from the parsed URL for future raw socket usage when possible. IPV4 only for now. - Prepare for getaddrinfo call. - Add data structures to pool struct for socket communications. - Put all socket definitions in util.h to allow reusing by added socket functions to be used in util.c. BFGMiner Version 2.8.3 - October 18, 2012 - Update to libblkmaker 0.1.3 - Use explicit host to BE functions in scrypt code instead of hard coding byteswap everywhere. - Ease the checking on allocation of padbuffer8 in the hope it works partially anyway on an apparently failed call. - Round target difficulties down to be in keeping with the rounding of detected share difficulties. - String alignment to 4 byte boundaries and optimisations for bin<->hex conversions. - Fix GPU memory allocation size for scrypt - Fix access violation with scrypt mining - Bugfix: Only free rpc_req after using it, not before - Bugfix: Increment work->pool->staged inside of mutex to avoid work being freed (and staged decremented) before we dereference it - Revert "No need for extra variable in hash_push.": The extra variable is needed to avoid a rare dereference-after-free error. - In opencl_free_work, make sure to still flush results in dynamic mode. - Workaround: Debug log only after dec_queued, to make a free/use race more rare - Bugfix: Remove redundant \n in debug messages - Bugfix: Free rpc_req in pool_active and longpolls - README: Explicitly provide Ubuntu package name for libjansson-dev - Bugfix: Include flash_led bool in cgpu_info for Icarus-but-not-BitForce builds, since Cairnsmore uses it - Only check work block id against pool's if the pool has a known block id - Avoid clearing pool->block_id unless we really are changing pools BFGMiner Version 2.8.2 - October 8, 2012 - Update to libblkmaker 0.1.2 - Bugfix: --temp-target no longer has a simple default (fixes build without OpenCL support) - Bugfix: icarus: Silence false epoll error - Bugfix: icarus: Set firstrun for errors starting next job, so the current one finishes properly - Bugfix: icarus: Restore generic failure management for write errors - Use strtod not strtol for bitforce temp backup. - Cope with broken drivers returning nonsense values for bitforce temperatures. - Minor warning fixes. - Fix unused warnings on ming build. - Fix sign warning in ocl.c - fds need to be zeroed before set in modminer. - Put scrypt warning on separate line to avoid 0 being shown on windows as bufsize. - Prevent corrupt values returned from the opencl code from trying to read beyond the end of the buffer by masking the value to a max of 15. - Icarus USB write failure is also a comms error - api.c DEBUG message has no paramter - Icarus catch more USB errors and close/reopen the port - API-README update cgminer verison number - hashmeter fix stats kh/s on 32bit windows - cairnsmore: Increase maximum clock frequency to 210 Mhz - icarus: Hashrate estimates really don't need the attention of a warning, demote them to debug - cairnsmore: Automatically "downgrade" default FPGA-per-device to 1 for dynclock devices - Bugfix: cairnsmore: Get autodetection of dynclock to work consistently - cairnsmore: Adjust dynclock usage to react in proper time - dynclock: Document function usage - cairnsmore: Fix race on dynclock detection - icarus: Detect attempts to send commands via work and neuter them - cairnsmore: Glasswalker has a minimum multiplier of 20 :( - cairnsmore: Detect frequency changing support despite hashing of commands - modminer: Allow clocks down to 2 Mhz just in case - Allow device drivers and users to properly change target temperatures for non-GPUs - Check that ncurses*-config installs actually work before deciding to use them - Bugfix: Fix multiple bugs in autogen.sh - - Don't use readlink -f unneccesarily (it's not portable) - - Always run autoreconf within the real source directory - - Run configure from PWD, *not* the real source directory - Bugfix: Include nonce in data buffer for debugging - Bugfix: swap32* wants count of 32-bit blocks, not bytes - Initial Cygwin port - Revert "Remove needless roundl define.", since it is needed for Cygwin and OpenWRT - Bugfix: Deal with various compiler warnings - modminer: Implement --temp-hysteresis logic - Support for maximum frequency being below the default, eg when the maximum is temporarily reduced to deal with temperature - Bugfix: modminer: Reduce dynclock max frequency as needed to keep temperature below cutoff - Bugfix: Restore disabled label, needed to skip over hashrate calculations (which mess up otherwise) - Bugfix: bitforce: Count actual throttling as hardware errors - icarus: Allow failure in case of reopen failure, now that the miner core will retry on its own - If a device dies, attempt to reinitialize it occasionally - Bugfix: The REST flag is now preferred over WAIT, since the former might trigger the latter - Bugfix: modminer: Update temperature readings when disabled (fixes thermal cutoff recovery) - Bugfix: Move thermal cutoff to general watchdog code (fixes bitforce recovery) - Rename enable_device to register_device, since it only works for setting it up at startup - Move targettemp from ADL to cgpu_info, so all devices can readily use it - Bugfix: "REST" flag had too much padding - Bugfix: adl: Only warn and disable GPU due to thermal cutoff, if it's actually enabled - Bugfix: bitforce: Only warn and disable bitforce due to thermal cutoff, if it's actually enabled BFGMiner Version 2.8.1 - September 27, 2012 - Avoid strndup for Windows compatibility - Bugfix: cairnsmore: Add missing compat.h include (for sleep) - cairnsmore: Implement "identify" for supported firmware - Adjust identify_device API to return a bool whether supported or not, for runtime capability detection - Bugfix: cairnsmore: Fix invalid share detection on LE - Bugfix: icarus: Fix logging message to not assume "Icarus" always, and use device driver name - Bugfix: cairnsmore: Correct frequency scaling detection logic - cairnsmore: When changing frequency, adjust Hs expectations accordingly - cairnsmore: Detect availability of frequency scaling, and only enable it when supported - cairnsmore: Implement dynamic clocking support for Glasswalker's bitstream - Update libblkmaker to 0.1.1 - Advertise BFGMiner in blocks found by default (without --coinbase-sig) - RPC: Add "Coinbase-Sig" to config/setconfig - New --coinbase-sig option to add arbitrary data to blocks you generate (GBT only) - opencl: Defer nonce validity checking to submit_nonce - scrypt: Implement test_nonce2 and submit_nonce hw error check - Bugfix: modminer: Convert nonce to native endian - Interpret any attempts to submit a H-not-zero nonce as a hardware error - make-release: Strip DLLs and EXE in Windows binary - dynclock: Use consistent messages for frequency changes - modminer: Port to dynclock - dynclock: Split dynamic clocking algorithm out of Ztex driver - Bugfix: When changing GPU memclock, adjust internal variable so it is correctly saved to config file - Bugfix: Re-probe longpoll header for each pool alive check, including retries when a preferred protocol fails - Bugfix: modminer: Bitstream binary filenames are *.bit - modminer: Start frequency off at 200 Mhz - Reorder libztex header include order to fix missing struct definition. - Display share difficulty on log with a shortened hash display on submission. - API stats add some pool getwork difficulty stats - Ignore any pings pushed to the worker threads if the thread is still paused to prevent it being enabled and disabled repeatedly. - Test for sequential getwork failures on a pool that might actually be up but failing to deliver work as we may end up hammering it repeatedly by mistake. - reduce windows compile warnings - util.c - bug - proxy - no data end condition - API don't change 'Diff1 Shares' - backward compatability FTW - miner.php highlighting correctly handling difficulty - API - Add last share difficulty for devices and pool - Store and report Accepted,Rejected,Stale difficulty in the summary and API - WorkTime - display prevblock for scrypt - api.c remove compile warnings - Calculate work difficulty for each getwork and display with WorkTime debug - FPGA - allow long or short device names in detect code + style police - WorkTime - multiple nonce per work and identify the work source - Optional WorkTime details with each Accepted/Rejected work item - Icarus - ignore hardware errors in timing mode - miner.php oops - mistype - API pgaidentify - unsupported message should be a warning - API/BFL identify a device - currently only BFL to flash the led - BFL add throttle count to internal stats + API - BFL: missing device id in log message - Bugfix: ztex: Clear device_ztex before freeing it - Bugfix: ztex: statline existence depends on whether the libztex structure exists, not whether the cgpu is enabled - Bugfix: README: Make usermod commands consistent, including important -a option - Bugfix: Address a couple of rare TQ leaks, and improve logging a bit - Bugfix: Properly quote configure options BFGMiner Version 2.8.0 - September 15, 2012 - Be specific about jansson version requirement - Replace "Alive" in pool status with protocol in use (GBT or GWork) - Remove copy of old jansson from source repository - Honour block template expiry (BIP 23 Basic Pool Extensions "expires") - Add --no-gbt option so getblocktemplate can be disabled if it causes problems - BIP 22 long polling - Properly detect pool protocol - Bugfix: Sort out work template refcounting by properly using work_free and new workcpy - Support for rolling extranonce in templates - Initial libblkmaker integration, using a git submodule - cairnsmore: There's no set hashrate like Icarus, so always use short timing mode by default - Bugfix: Include unistd.h needed for ssize_t type - fpgautils: Don't try to scan serial at all anymore, if a device is claimed - fpgautils: serial_claim function to politely ask other drivers not to try to use device - RPC: Update to work with Cairnsmore - cairnsmore: Windows autodetect using FTDI library - cairnsmore: Beginnings of new driver, with automatic upgrade from Icarus detection - icarus: Support disabling reopen quirk via --icarus-options - proxy: Replace mess of encoding proxy into pool URI with a --pool-proxy option, and use cURL's builtin proxy URI support - save individual pool proxy settings to config - API-README update for pools proxy info - CURL support for individual proxy per pool and all proxy types - Bugfix: Update current_block_id for fixed set_curblock - miner.php by default don't display IP/Port numbers in error messages - api.c all STATUS messages automatically escaped - API add display of and setting queue,scantime,expiry - README - FPGA device FAQ - API add device diff1 work - count device diff1 shares - API-README update - api.c Correct diff1 field name - Bugfix: Sanitize block hash handling (including fixing on big endian) - Bugfix: Print the (full) correct block hash when warning about work issued against old blocks - Bugfix: When comparing current block, only pay attention to the prevblock header - Allow mixing user+pass and userpass, so long as user+pass are balanced before userpass options - ztex: Include device serial number and FPGA number in cgpu name field - ztex: Abstract common cgpu_info creation code - ztex: Do thread initialization in thread_init rather than thread_prepare - Bugfix: Tolerate working on old blocks when there is only one pool enabled - Bugfix: ztex: Detect through fpgautils so -S noauto correctly inhibits autodetection - ztex: Workaround duplicate share submissions by doubling "backlog" size - ztex: Use consistent device ids for logging - Bugfix: ztex: Increment global hw_errors too - Bugfix: free adhoc string elist element when removing it from list - Bugfix: icarus: Initialize lret variable after work restart reentry - Bugfix: ztex: Free lastnonce heap memory if backlog allocation fails - icarus: Initialize epoll event structure in a way Valgrind is happier with - Bugfix: Use strtok_r for parse_config since some options use strtok themselves - Import strtok_r from gnulib for Windows portability - Bugfix: ztex: Don't try to destroy a mutex that was never created (single FPGA Ztex devices) - ztex: Clean up redundant dereferencing in ztex_shutdown - API-README more debug parameter information - API allow full debug settings control - Sort the blocks database in reverse order, allowing us to remove the first block without iterating over them. Output the block number to debug. - Adjust opencl intensity when adjusting thread count to prevent it getting pegged at a value below the minimum threads possible. - miner.h max_hashes -> int64_t - Keep the local block number in the blocks structs stored and sort them by number to guarantee we delete the oldest when ageing the block struct entries. - Use correct sdk version detection for SDK 2.7 - Bugfix: Align Ztex statline properly by removing redundant frequency - make-release: Convert text files to DOS format for Windows ZIP BFGMiner Version 2.7.5 - August 27, 2012 - Revert "Do a complete cgminer restart if the ATI Display Library fails, as it does on windows after running for some time, when fanspeed reporting fails." - Stop special-casing worksize default to 256 for Cypress, since it incurs a 5 MH/s hit with stock config - New "--scan-serial all" feature to probe all enumerated serial ports - modminer: Revamp dynamic clocking algorithm per request from cablepair - Test for lagging once more in queue_request to enable work to leak to backup pools. - There is no need to try to switch pools in select_pool since the current pool is actually not affected by the choice of pool to get work from. - Only clear the pool lagging flag if we're staging work faster than we're using it. - needed flag is currently always false in queue_request. Remove it for now. - thr is always NULL going into queue_request now. - Fix for non-ADL OpenCL device formatting issue BFGMiner Version 2.7.4 - August 23, 2012 - Perform select_pool even when not lagging to allow it to switch back if needed to the primary. - Simplify macros in output kernels avoiding apparent loops and local variables. - Carry the needed bool over the work command queue. - Move the decision to queue further work upstream before threads are spawned based on fine grained per-pool stats and increment the queued count immediately. - Track queued and staged per pool once again for future use. - OpenCL 1.0 does not have native atomic_add and extremely slow support with atom_add so detect opencl1.0 and use a non-atomic workaround. - Pools: add RollTime info to API 'stats' and 'Stats' button in miner.php BFGMiner Version 2.7.3 - August 23, 2012 - Minimise the number of getwork threads we generate. - Pick worksize 256 with Cypress if none is specified. - Give warning with sdk2.7 and phatk as well. - Whitelist sdk2.7 for diablo kernel as well. - Only keep the last 6 blocks in the uthash database to keep memory usage constant. Storing more is unhelpful anyway. - Increase kernel versions signifying changed APIs. - BFL flash - more FPGA-README - Check we haven't staged work while waiting for a curl entry before proceeding. - Use atomic ops to never miss a nonce on opencl kernels, including nonce==0, also allowing us to make the output buffer smaller. - Remove compile errors/warnings and document compile/usage in FPGA-README - Ignore the submit_fail flag when deciding whether to recruit more curls or not since we have upper bounds on how many curls can be recruited, this test is redundant and can lead to problems. - API-README update cgminer version number - API-README fix groups P: example mistake - API-README add COIN and other edits - miner.php allow 'coin' is custom pages BFGMiner Version 2.7.1 - August 22, 2012 - Update windows build instructions courtesy of sharky. - Increase max curls to number of mining threads + queue * 2, accounting for up and downstream comms. - Queue enough requests to get started. - There is no point trying to clone_work in get_work() any more since we clone on every get_work_thread where possible. - There is no point subtracting 1 from maxq in get_work_thread. - miner.php allow page title to be defined in myminer.php - Only set lagging flag once there are no staged work items. - select_pool does not switch back to the primary once lagging is disabled. - Increment total work counter under mutex lock. - Increment the queued count after the curl is popped in case there's a delay waiting on curls and we think we've queued work when in fact we're waiting on curls. - Do the dynamic timing in opencl code over a single pass through scanhash to make sure we're only getting opencl times contributing to the measured intervals. - Increase curl reaping time to 5 minutes since comms between curl requests can be 2 mins apart with lots of rolltime. - No need for extra variable in hash_push. - Remove short options -r and -R to allow them to be reused and remove readme entries for deprecated options. - Deprecate the opt_fail_pause parameter, leaving a null placeholder for existing configurations. - Free work before retrying in get_work_thread. - Don't pause after failed getwork, set lagging flag and reassess. - We should not be pausing in trying to resubmit shares. - Get rid of the extending fail pause on failed connects since we discard work after a period. - get_work always returns true so turn it into a void function. - get_work never returns false so get rid of fail pause loop. - Get rid of pause and retry from get_upstream_work so we only do it from one place. - Remove all cases where --retries aborts BFGMiner, making it for submission retries only, where it makes sense. BFGMiner Version 2.7.0 - August 21, 2012 - Implement a new pool strategy, BALANCE, which monitors work performed per pool as a rolling average every 10 minutes to try and distribute work evenly over all the pools. Do this by monitoring diff1 solutions to allow different difficulty target pools to be treated equally, along with solo mining. Update the documentation to describe this strategy and more accurately describe the load-balance one. - fpga serial I/O extra debug (disabled by default) - Getwork fail was not being detected. Remove a vast amount of unused variables and functions used in the old queue request mechanism and redefine the getfail testing. - Consider us lagging only once our queue is almost full and no staged work. - Simplify the enough work algorithm dramatically. - Only queue from backup pools once we have nothing staged. - Don't keep queueing work indefinitely if we're in opt failover mode. - Make sure we don't opt out of queueing more work if all the queued work is from one pool. - Set lagging flag if we're on the last of our staged items. - Reinstate clone on grabbing work. - Grab clones from hashlist wherever possible first. - Cull all the early queue requests since we request every time work is popped now. - Keep track of staged rollable work item counts to speed up clone_available. - Make expiry on should_roll to 2/3 time instead of share duration since some hardware will have very fast share times. - Check that we'll get 1 shares' worth of work time by rolling before saying we should roll the work. - Simplify all those total_secs usages by initialising it to 1 second. - Overlap queued decrementing with staged incrementing. - Artificially set the pool lagging flag on pool switch in failover only mode as well. - Artificially set the pool lagging flag on work restart to avoid messages about slow pools after every longpoll. - Factor in opt_queue value into enough work queued or staged. - Roll work whenever we can on getwork. - Queue requests for getwork regardless and test whether we should send for a getwork from the getwork thread itself. - Get rid of age_work(). - Don't try to get bitforce temperature if we're polling for a result to minimise the chance of interleaved responses. - Fix harmless unused warnings in scrypt.h. - Check we are not lagging as well as there is enough work in getwork. BFGMiner Version 2.6.5 - August 20, 2012 - API new command 'coin' with mining information - Add message to share if it's a resubmit. - Add virtual adl mapping for when none is specified on the command line to not crash without a map specified. - Fix ADL gpu-map not working when there are more ADL devices than openCL. Patch supplied and tested by Nite69. - bitforce: Initial import of Linux-only bitforce-firmware-flash utility - Revert stale-on-arrival failsafe, since it ends up needing exceptions for everything - Bugfix: opencl: Declare opencl_dynamic_cleanup in header - Even if we want to submit stale shares, give up if we have more submissions waiting on threads (even before failing) - Even if we want to submit stale shares, give up if they've failed and we have more submissions waiting on threads - opencl: Use timeBeginPeriod on Windows to ensure gettimeofday has sufficient precision for dynamic intensity - Bugfix: opencl: Move ADL fanspeed warning messages to a new thread to get around summary-update deadlocking - README: Note that user groups don't get updated until re-login - Initialise cnt in libztex.c - Don't try to start devices that don't support scrypt when scrypt mining. - Repeating on timeout in ztex could make the code never return. - Offset libusb reads/writes by length written as well in ztex. - Cope with timeouts and partial reads in ztex code. - If there are more devices than nDevs, don't iterate over them as they may overwrite devices mapped below that with the mapping option. - Fix README faq on bfl auto-detect. - Set memory clock based on memdiff if present from with engine changes, allowing it to parallel manual changes from the menu as well. - api.c typo - API allow display/change failover-only setting - API-README corrections - miner.php documentation (in API-README) v0.1 - Bugfix: opencl: Show blank device-info statline area if GPU doesn't have ADL, to fix column alignment - README: Document usage of 0 to indicate "leave at default" for comma- delimited GPU options - Correct API-README versions to match when BFGMiner included them - API-README update changelog - Minimise locking and unlocking when getting counts by reusing shared mutex lock functions. - Avoid getting more work if by the time the getwork thread is spawned we find ourselves with enough work. - The bitforce buffer is cleared and hw error count incremented on return from a failed send_work already so no need to do it within the send_work function. - Don't make mandatory work and its clones last forever. - modminer: Log debug info for nonces found BFGMiner Version 2.6.4 - August 11, 2012 - Bugfix: Define my_cancellable_getch in miner.h - Escape " and \ when writing json config file - miner.php allow a custom page section to select all fields with '*' - e.g. to create a STATS section on a custom page - miner.php optional single rig totals (on by default) - Bugfix: Initialize submitting mutex - Bugfix: bitforce: Allocate enough space for FTDI description pointers - Queue one request for each staged request removed, keeping the staged request count optimal at all times. - Bugfix: Avoid cancelling threads while locks are held - Set recognizable names on threads for debugging - Bugfix: Don't keep making new get_work threads if all pools are dead - Enable configuring submission thread limit with --submit-threads option - Bugfix: Limit active submission threads to 0x40 so we don't overflow - Bugfix: Properly handle switching to pools that aren't on the latest block, and warn if a pool actively switches to an old block - Log more details of reasons in stale_work debug messages - Failsafe against stale-on-arrival work: disable the pool - Bugfix: Debug message should show "Work stale due to work restart" when it's not a share - windows-build: Remove APP SDK section since it is no longer needed - modminer: HACK: Let last_work handle the end of the work, and start the next one immediately - Bugfix: modminer: Remove erroneous "else" statement, to fix hashrate reporting - README: Document user group required for FPGAs on Gentoo and Ubuntu - BFGMiner-specific README adjustments - Bugfix: opencl: Ignore error getting device ids from platforms unless they are explicitly chosen - New --debuglog option to include debug info in stderr logfile even if not in the console - Bumped down debhelper compatibility reqs so that this will build on Lucid. - Updated to match packaging changes. - Switched to native packages so we don't have to muck around creating fake upstream tarballs, and can easily generate minor versions for upload to Launchpad. - Removed accidentally included debugging line. - Minor version bump again because of launchpad. Will sort this out for next release. - Updated to patch bitforce module issue on Debian/Ubuntu. - Added local quilt config dir to ignore. - modminer: Check nonce against previous work, in case of race - Bugfix: Enable --kernel-path option if ModMiner or Ztex is enabled (even if no OpenCL) - Bugfix: Escape backslashes and double-quotes in strings that rightfully may have them, when writing JSON config file - Clean object (.o) and dependency (.d) files out of source tree - Bugfix: bitforce: Don't count hashes that never happened due to throttling - Bugfix: Deal with serial_open timeout maximum (25.5s) - - fpgautils: Linux only supports uint8_t decisecond values for timeouts, so use uint8_t for timeout value; this gets smart compilers to throw warnings when overflowed in some cases - - bitforce: Reduce serial timeout to 25 seconds (was 30) and increase job long timeout to 25 seconds (was 15) to handle throttling gracefully - modminer: Add debug info to API extra device stats - modminer: Raise clock speed when there's only good nonces for a while - modminer: Only print clock speed adjustments when they actually change - modminer: Increase tolerance for bad nonces to 2% - modminer: Reset bad-nonce ratio measurement when the clock speed changes - Bugfix: bitforce: Include the correct device id in "garbled response" warning - ADL: Add attribution and disclaimer to interfaces - Cleaned out refs to AMD SDKs. - Updated README about debian packaging, changelog with minor version bump to work around Launchpad reqs. - Updated changelog with Ubuntu release specific version, needed to build for multiple releases. Also stripped out ADL SDK stuff in the build rules. - Initial work to adjust debian packaging from cgminer. Should build correctly now with pbuilder/pdebuild, and include docs. - Adapt miner code to free ADL structures - Import free ADL interfaces - Include scrypt.h in Makefile. - Fix windows bitforce build. - Convert the serial autodetect functions to use int instead of char to enumerate devices. - Uglify windows autodetect code for BFL. - There is no point zeroing temperature in BFL if we fail to get a response, and we should register it as a HW error, suggesting throttling. - Update SCRYPT README with information about HW errors. - Use the scrypt CPU code to confirm results from OCL code, and mark failures as HW errors, making it easier to tune scrypt parameters. - We may as well leave one curl still available per pool instead of reaping the last one. - Display reaped debug message outside mutex lock to avoid recursive locking. - api.c update API start message and include port number - miner.php ignore arg when readonly - miner.php allow pool inputs: delete, addpool, poolpriority - bitforce: Reopen on communication error - Bugfix: Calculate hw err percent for the affected FPGA only - make-release: Adapt to new autogen by using NOCONFIGURE var BFGMiner Version 2.6.3 - August 6, 2012 - modminer: Relax no-nonces downclocking condition to be more reasonable - README: Update scrypt configure option - README: Update configure options - Bugfix: Display --disable-modminer in configure --help now that it is enabled by default - Add specific information when ADL detects error -10 saying the device is not enabled. - modminer: Shorten upload warning message to fit better - modminer: Sending a "ping" first, to workaround bug in new firmware betas - modminer: Include Hardware Errors and Valid Nonces in extra device status - Bugfix: modminer: Calculate bad-nonce percentage based only on the same FPGA's hardware errors, accurately - modminer: Show bitstream upload progress in statline, and only report to log every 10% - modminer: Be more verbose about why the clock is getting reduced - Document how Icarus golden nonce is handled by other FPGAs - Rewrite should_run for sched, to properly handle one-shot schedules spanning midnight - Bugfix: Check list_empty in pop_curl_entry after condition wait - Bugfix: Only add new pools to array after completing basic structure initialization - If __BFGMINER_SEGFAULT_ERRQUIT is set in the environment, segfault on non-zero quit()s - Check against NULL pointers getting into curlring - modminer: Finish a process results run with a nonce poll, rather than sleep - modminer: Workaround Windows driver failures - Count likely throttling episodes on bitforce devices as hardware errors. - Bugfix: bitforce: Increase serial read timeout to 30 seconds during actual mining, to tolerate more throttling - Style cleanups. - Make pool_disabled the first in the enums == 0, fixing the pool enabled count which compares if value is not enabled before enabling it. - Correct writing of scrypt parameters to config file based on command line parameters only. - Add scrypt support while writing conf - Use different variables for command line specified lookup gap and thread concurrency to differentiate user defined versus auto chosen values. - Queue a request on pool switch in case we have no work from the new pool yet. - API remove unused warning in non-GPU compile - api.c in linux allow to open a closed socket in TIME_WAIT - Display failover only mode in pool menu and allow it to be toggled live. - Reinstate check for system queueing lag when the current pool's queue is maxed out, there is no staged work, and the work is needed now. - Fix harmless warnings. - Check the current staged and global queued as well before queueing requests. Discard stales before ageing work in the watchdog thread. Queue requests after discarding and ageing work in watchdog thread. Display accurate global queued in curses output. Reuse variable in age_work(). - The queueing mechanism has become a complex state machine that is no longer predictable. Rewrite it from scratch watching only current queues in flight and staged work available on a pool by pool basis. - Update debian package configs to v2.6.2 - Queue an extra request whenever staged work drops below mining thread count in hash_pop. - Bugfix: Initialize logwin to 1 line high temporarily, to avert PDCurses crash - Enable FPGA support by default, as long as their dependencies are met - Bugfix: modminer: Search for *ModMiner* in udev ID_MODEL - make-release: build with --enable-scrypt - miner.php support custom report section joins - ICA default fpga_count to work_division if specified - FPGA-README document new hidden --icarus-options - ICA support 57600 baud rate, up to 8 FPGA and partial working FPGA boards - Scrypt mining does not support block testing yet so don't try to print it. - Clear the bitforce buffer whenever we get an unexpected result as it has likely throttled and we are getting cached responses out of order, and use the temperature monitoring as a kind of watchdog to flush unexpected results. - It is not critical getting the temperature response in bitforce so don't mandatorily wait on the mutex lock. - Check there is a cutoff temp actually set in bitforce before using it as a cut off value otherwise it may think it's set to zero degrees. - We dropped the temporary stopping of curl recruiting on submit_fail by mistake, reinstate it. - Make threads report in either side of the scanhash function in case we miss reporting in when restarting work. - Add debugging output when work is found stale as to why. - Print the 3 parameters that are passed to applog for a debug line in bitforce.c - Clear bitforce buffer on init as previously. - Add some headroom to the number of curls available per pool to allow for longpoll and sendwork curls. - Show the correct base units on GPU summary. - Bugfix: bitforce: 1 decisecond timeout is unreasonably short, give it a second - Bugfix: Don't try to log abandon time, since we aren't keeping track reasonably - Import uthash 1.9.6 - Bugfix: bitforce: Pause after send_work failures - Fix comm error handling to not consider work restarts an error condition - comm error bug fix - Bugfix: No endian.h on Windows - Remove unused mkinstalldirs - Display scrypt as being built in as well. - Fix build warning about KL_SCRYPT when built without scrypt support. - News update. - More scrypt intensity information. - Minor readme updates. - Update README with more build instructions. - Remove the low hash count determinant of hardware being sick. A low hash rate can be for poor network connectivity or scrypt mining, neither of which are due to sick hardware. - Style - API-README poolpriority changes - api.c verify poolpriority parameters before changing pools - api.c poolpriority changes - Implement shared swap32(yes|tole|tobe) function to handle endian flipping 32-bit chunks in blocks - Use correct macros for endian handling code BFGMiner Version 2.6.1 - July 29, 2012 - Autoselect --scrypt iff all pools send scrypt work - Adapt SCRYPT-README to BFGMiner (directing Bitcoin donations the correct direction to reach Con) - Remove mentions of Litecoin specifically - Bugfix: Fix build without OpenCL but with scrypt - make-release: Add SCRYPT-README - Bump version 2.6.0, adding SCRYPT README to makefile. - Smarter autogen.sh script. - Sleeping on intensity decrease is broken, remove it. - Sleep only the extra amount of time we overran the dynamic interval in dynamic mode. - Add scrypt documentation in the form of a separate readme. - Fix build error without scrypt enabled. - Limit thread concurrency for scrypt to 5xshaders if shaders is specified. - Simplify repeated use of gpus[gpu]. in ocl.c - Find the nearest power of 2 maximum alloc size for the scrypt buffer that can successfully be allocated and is large enough to accomodate the thread concurrency chosen, thus mapping it to an intensity. - Don't make opt_scrypt mandatory blocking with opencl code. - Update kernel versions reflecting changes in the API. - Make the thread concurrency and lookup gap options hidden on the command line and autotune parameters with a newly parsed --shaders option. - Fix target testing with scrypt kernel as it would have been missing shares below target. - Always create the largest possible padbuffer for scrypt kernels even if not needed for thread_concurrency, giving us some headroom for intensity levels. - Use the detected maximum allocable memory on a GPU to determine the optimal scrypt settings when lookup_gap and thread_concurrency parameters are not given. - Check the maximum allocable memory size per opencl device. - Add debugging output if buffer allocation fails for scrypt and round up bufsize to a multiple of 256. - Nonce testing for btc got screwed up, leading to no accepted shares. Fix it. - Display size of scrypt buffer used in debug. - Allow intensities up to 20 if scrypt is compiled in. - Add name to scrypt kernel copyright. - Allow lookup gap and thread concurrency to be passed per device and store details in kernel binary filename. - Ignore negative intensities for scrypt. - Change the scale of intensity for scrypt kernel and fix a build warning. - Correct target value passed to scrypt kernel. - Use 256 output slots for kernels to allow 1 for each worksize. - Test the target in the actual scrypt kernel itself saving further calculations. - Reinstate GPU only opencl device detection. - Decrease lookup gap to 1. Does not seem to help in any way being 2. - Fix build. - Make pad0 and pad1 local variable in scrypt kernel. - Constify input variable in scrypt kernel. - Send correct values to scrypt kernel to get it finally working. - Create command queue before compiling program in opencl. - Fix external scrypt algo missing. - Limit scrypt to 1 vector. - Handle KL_SCRYPT in config write. - Get rid of stuff. - Don't enqueuewrite buffer at all for pad8 and pass work details around for scrypt in dev_blk. - Set the correct data for cldata and prepare for pad8 fixes. - Get rid of spaces in arrays in scrypt kernel. - Start with smaller amount of hashes in cpu mining to enable scrypt to return today sometime. - Free the scratchbuf memory allocated in scrypt and don't check if CPUs are sick since they can't be. Prepare for khash hash rates in display. - Add cpumining capability for scrypt. - Set scrypt settings and buffer size in ocl.c code to be future modifiable. - Cope with when we cannot set intensity low enough to meet dynamic interval by inducing a forced sleep. - Make dynamic and scrypt opencl calls blocking. - Fix nonce submission code for scrypt. - Make sure goffset is set for scrypt and drop padbuffer8 to something manageable for now. - Set up buffer8 for scrypt. - Build fix for opt scrypt. - Don't check postcalc nonce with sha256 in scrypt. - Don't test nonce with sha and various fixes for scrypt. - Make scrypt buffers and midstate compatible. - Use specific output array entries in scrypt kernel. - Provide initial support for the scrypt kernel to compile with and mine scrypt with the --scrypt option. - Enable completely compiling scrypt out. - Begin import of scrypt opencl kernel from reaper. BFGMiner Version 2.5.3 - July 29, 2012 - Bugfix: Add zlib1.dll to Win32 release archive - Bugfix: SICK low-hashrate is now determined by being under 1/3 the runtime average hashrate - Bugfix: cpu_set_t is never #defined, so use CPU_ZERO which is a macro BFGMiner Version 2.5.2 - July 29, 2012 - Limit total number of curls recruited per pool to the number of mining threads to prevent blasting the network when we only have one pool to talk to. - Bugfix: Skip writing configuration of range-limited int options with negative values - Bugfix: Correctly attempt to load ~/.bfgminer/bfgminer.conf or ~/.cgminer/cgminer.conf as defaults - Send X-Minimum-Wait header on longpolls, to explicitly inform pools we will handle a response with no delay - bitforce: Abandon (only) stale searches for work restarts - Keep a counter of enabled pools and use that instead of iterating over the pool list. Use that value to ensure we don't set the last remaining active pool to the rejecting state. - bitforce: Skip out of sending work if work restart requested - RPC: Writeup on poolpriority command usage - Bugfix: API: Report errors from poolpriority command - RPC: New "poolpriority" command to set the order of pool priorities - strtok_ts: Thread-safe strtok that work on POSIX or Windows - Bugfix: Supress "caught up" event when first switching to a pool - Announce and restart work immediately when current pool has caught up to the current block - Bugfix: Don't consider work stale due to other pools' longpolls, if --failover-only is active - Refactor stale_work function to only flag actual stale shares - stale_work: Don't factor getwork delay into expiry for shares (only for work itself) - Bugfix: Use pool number rather than numeric pointer to strict pool, in block found notice - Accept JSON Numbers in config file parameters - Improve readability of OPT_HASARG in parse_config - Allow JSON false as a valid value for strictly boolean options - Include scan-serial in example configuration file - fpgautils: add support for 57.6 kBd serial - miner.php add a socket RCV timeout for if cgminer is hung and the API thread is still running - BFL force all code to timeout to avoid hanging - Initialise mdplatform. - Find the gpu platform with the most devices and use that if no platform option is passed. - Bugfix: It is not a hardware error if nonces returned from modminer don't meet the pool target - bitforce & icarus: Log detection failures at debug log level, so we don't confuse users who have different devices (which is why these drivers are failing detection!) - Show "WAIT" (LIFE_WAIT status) if a cgpu is idle waiting for work (pool slow/dead) - Instead of quitting on failing N retries, just discard the share - Bugfix: Don't discard stale shares after submission failure, if user or pool wants stales submitted - Bugfix: Record discard-during-retry shares in the sharelog - Bugfix: Only show Algorithm in RPC summary if CPU mining is actually active - OpenCL: Remove intensity from statline, since it overflowed - Move "Q" (requested getworks) to second status line as "GW" to balance out better - Bugfix: Use a mutex to control non-curses output - Simplify code to a single vprintf path for curses-less printing - Move opt_quiet check to my_log_curses, so it works for curses-less builds - Use log_generic for vapplog to cut down on code duplication - Bugfix: Copy argv[0] given to dirname() - Find the gpu platform with the most devices and use that if no platform option is passed. - Allow more platforms to be probed if first does not return GPUs. - Detach pthread from within the api thread in case it is terminated due to not being instantiated before pthread_cancel is called from main, leading to a segfault. - Debug output per thread hashrate is out by a factor of 1000. - Don't check if CPUs are sick since they can't be. - Calculate midstate in separate function and remove likely/unlikely macros since they're dependent on pools, not code design. - Display in debug mode when we're making the midstate locally. - Bugfix: Document --no-adl and --gpu-platform - Bugfix: Remove redundant documentation of --auto-fan and --auto-gpu (they are in GPU-specific options) - CPU mining may not be included in binaries, but it's not deprecated for BFGMiner either - Bugfix: Restore case-insensitivity to input - Scroll the device list with up/down arrow keys, if it is overflowed - Use select statement to handle input - Bugfix: Actually check that the device fits in the individual summary window before trying to print it - Bugfix: Fix build without curses but with OpenCL - Bugfix: Don't show a Temperature key if it isn't known - BFGMiner-specific NEWS fix BFGMiner Version 2.5.1 - July 13, 2012 - Replace CPU Algo in header with runtime - Bugfix: Calculate diff-1 utility to fix utility-hashrate on pools with diff!=1 - Add utility hashrate to curses display - Show units in kh, Gh, Th, etc as needed to use at most 3 integer digits - Use FTD2XX.DLL on Windows to autodetect BitFORCE SHA256 devices - bitforce_get_result returns -1 on error now. - Check return value of read in BFgets - Bugfix: modminer: Count hashes done before work restart - Bugfix: modminer: Adapt "get nonce" error condition to new scanhash=>-1 error API - Bugfix: Make our Windows nanosleep/sleep replacements standards-compliant (which fixes nmsleep) and include compat.h for bitforce (for sleep) - miner.php fix rig # when miners fail - Fix whitespace mangling. - bitforce: Use "full work" vs "nonce range" for kernel name - Abbrv. correction - Remove superfluous ave_wait - Put kname change for broken nonce-range back in - Add average wait time to api stats - Revert "Merge branch 'ave_time' of https://github.com/pshep/cgminer.git" - Add average return time to api stats - Missed one nonce-range disabling. - Change timeouts to time-vals for accuracy. - More BFL tweaks. Add delay between closing and reopening port. Remove buffer clear in re-init Add kernel type (mini-rig or single) - Revert "Change BFL driver thread initialising to a constant 100ms delay between devices instead of a random arrangement." - Only try to shut down work cleanly if we've successfully connected and started mining. - Fix spelling. - modminer: Firmware returns 0xffffff00 immediately if we set clockspeed too high - Bugfix: modminer: Actually count good shares - Bugfix: Adapt OpenCL scanhash errors to driver API change (errors are now -1, not 0) - Remove bitforce_thread_init The delay thing does nothing useful... when long poll comes around, all threads restart at the same time anyway. - fix API support for big endian machines - Bugfix: Use const struct device_api* for mt_disable - modminer: Show progress of bitstream upload - Bugfix: Don't declare devices SICK if they're just busy initializing - Bugfix: Calculate nsec in nmsleep correctly - miner.php allow rig names in number buttons - Change BFL driver thread initialising to a constant 100ms delay between devices instead of a random arrangement. - Spelling typo. - Time opencl work from start of queueing a kernel till it's flushed when calculating dynamic intensity. - Modify te scanhash API to use an int64_t and return -1 on error, allowing zero to be a valid return value. - Check for work restart after the hashmeter is invoked for we lose the hashes otherwise contributed in the count. - Remove disabled: label from mining thread function, using a separate mt_disable function. - Style changes. - Cope with signals interrupting the nanosleep of nmsleep. - Use standard cfsetispeed/cfsetospeed to set baud rate on *nix - miner.php split() flagged deprecated in PHP 5.3.0 - Bugfix: Use nmsleep instead of restart_wait, so we always wait the full time - Make long timeout 10seconds on bitforce for when usleep or nanosleep just can't be accurate... BFGMiner Version 2.5.0 - July 7, 2012 - Fix BitFORCE driver to not silenty discard valid shares (bug introduced by CGMiner merges) - Fix --benchmark not working since the dynamic addition of pools and pool stats. - Make disabling BFL nonce range support a warning since it has to be explicitly enabled on the command line now. - miner.php allow renaming table headers - Make bitforce nonce range support a command line option --bfl-range since enabling it decrease hashrate by 1%. - Add sanity checking to make sure we don't make sleep_ms less than 0 in bitforce. - The fastest minirig devices need a significantly smaller starting sleep time. - Use a much shorter initial sleep time to account for faster devices and nonce range working, and increase it if nonce range fails to work. - Use nmsleep instead of usleep in bitforce. - Provide a ms based sleep function that uses nanosleep to avoid the inaccuracy of usleep on SMP systems. - delay_time_ms is always set so need not be initialised in bitforce. - Increase bitforce timeout to 10 seconds. - Add more hysteresis and poll ~5 times to allow for timer delays in bitforce devices. - miner.php allow alternating line colours (off by default) - Display the actual duration of wait when it is greater than the cutoff. - Set nonce to maximum once we determine nonce range support is broken. - Initial wait time is always known so no need to zero it beforehand in bitforce. - No point counting wait time until the work is actually sent to bitforce devices. - Use string comparison functions instead of explicit comparisons. - Account for wait_ms time when nonce_range is in use on BFL. - Split nonces up into 1/5 chunks when nonce range is supported. - limit clear buffer iterations. - Ad fd check to clear buffer. - miner.php remove incorrect 'DATE' error message - miner.php allow summary header in custom pages - Disable nonce range support in BFL when broken support is detected. - Restart_wait is only called with a ms value so incorporate that into the function. - Only try to adjust dev width when curses is built in. - miner.php define custom sum fields as a simple array - Fix off-by-one error in nonce increment in bfl. - Use BE when setting nonce in bitforce nonce range work. - Enable nonce range in the normal init sequence for bfl. - Queue extra work at 2/3 differently depending on whether we're using nonce range or not. - Initially enable support for nonce range support on bfl, splitting nonces up into 3/4 size and only disable it if it fails on work submit. - Attempt to detect nonce range support in BFL by sending work requring its support. - Limit retrying on busy for up to BITFORCE_TIMEOUT_MS - Attempt to initialise while bitforce device returns BUSY. - Extend length of string that can be passed to BFL devices. - Fix signedness warning. - Adjust device width column to be consistent. - Use cgpu-> not gpus[] in watchdog thread. - Add api stats (sleep time) - Timing tweaks Added long and short timeouts, short for detecting throttling, long to give up totally. Reset sleep time when device re-initialised Still check results after timeout Back up a larger time if result on first poll. - Add API Notify counter 'Comms Error' - Style police on api.c - Do all logging outside of the bitforce mutex locking to avoid deadlocks. - Remove applog call from bfwrite to prevent grabbing nested mutexes. - Bitforce style changes. - Minor style changes. - Remove needless roundl define. - Made JSON error message verbose. - Fine-tune timing adjustment. Also remove old work_restart timing. - Check for gpu return times of >= 0, not just 0, to fix intensity dropping to -10. - Restart is zeroed in the mining thread so no need to do it inside the bitforce code. - More improvements to comms. BFL return nothing when throttling, so should not be considered an error. Instead repeat with a longer delay. - Polling every 10ms there's not much point checking the pthread_cond_timedwait as it just adds overhead. Simply check the value of work_restart in the bfl main polling loop. - Use a pthread conditional that is broadcast whenever work restarts are required. Create a generic wait function waiting a specified time on that conditional that returns if the condition is met or a specified time passed to it has elapsed. Use this to do smarter polling in bitforce to abort work, queue more work, and check for results to minimise time spent working needlessly. - Add busy time to wait time. - api.c put version up to 1.14 - Add tiny delay after writing to BFL Change BFL errors to something more human readable Send work busy re-tries after 10ms delay - Fix race condition in thread creation that could under some conditions crash BFGMiner at startup BFGMiner Version 2.4.4 - July 1, 2012 - Fix builds on non gnu platforms. - api.c ensure old mode is always available when not using --api-groups + quit() on param errors - Implement rudimentary X-Mining-Hashrate support. - Detect large swings in temperature when below the target temperature range and change fan by amounts dependant on the value of tdiff. - Adjust the fanspeed by the magnitude of the temperature difference when in the optimal range. - Revert "Restarting cgminer from within after ADL has been corrupted only leads to a crash. Display a warning only and disable fanspeed monitoring." - api.c fix json already closed - implement and document API option --api-groups - Put upper bounds to under 2 hours that work can be rolled into the future for bitcoind will deem it invalid beyond that. - define API option --api-groups - api.c allow unwell devices to be enabled so they can be cured - miner.php - fix/enable autorefresh for custom pages - miner.php allow custom summary pages - new 'Mobile' summary - Work around pools that advertise very low expire= time inappropriately as this leads to many false positives for stale shares detected. - Only show ztex board count if any exist. - There is no need for work to be a union in struct workio_cmd - fpgautils.c include a debug message for all unknown open errors - Don't keep rolling work right up to the expire= cut off. Use 2/3 of the time between the scantime and the expiry as cutoff for reusing work. - Log a specific error when serial opens fail due to lack of user permissions - Increase GPU timing resolution to microsecond and add sanity check to ensure times are positive. - Opencl code may start executing before the clfinish order is given to it so get the start timing used for dynamic intensity from before the kernel is queued. - fpgautils.c - set BAUD rate according to termio spec - fpgautils.c - linux ordering back to the correct way - miner.php remove unneeded '.'s - miner.php add auto refresh options - miner.php add 'restart' next to 'quit' - miner.php make fontname/size configurable with myminer.php - Make the pools array a dynamically allocated array to allow unlimited pools to be added. - Make the devices array a dynamically allocated array of pointers to allow unlimited devices. - Dynamic intensity for GPUs should be calculated on a per device basis. Clean up the code to only calculate it if required as well. - Bugfix: Provide alternative to JSON_ENCODE_ANY for Jansson 1.x - Use a queueing bool set under control_lock to prevent multiple calls to queue_request racing. - Use the work clone flag to determine if we should subtract it from the total queued variable and provide a subtract queued function to prevent looping over locked code. - Don't decrement staged extras count from longpoll work. - Count longpoll's contribution to the queue. - Increase queued count before pushing message. - Test we have enough work queued for pools with and without rolltime capability. - As work is sorted by age, we can discard the oldest work at regular intervals to keep only 1 of the newest work items per mining thread. - Roll work again after duplicating it to prevent duplicates on return to the clone function. - Abstract out work cloning and clone $mining_threads copies whenever a rollable work item is found and return a clone instead. - api.c display Pool Av in json - Take into account average getwork delay as a marker of pool communications when considering work stale. - Work out a rolling average getwork delay stored in pool_stats. - Getwork delay in stats should include retries for each getwork call. - Walk through the thread list instead of searching for them when disabling threads for dynamic mode. - Extend nrolltime to support the expiry= parameter. Do this by turning the rolltime bool into an integer set to the expiry time. If the pool supports rolltime but not expiry= then set the expiry time to the standard scantime. - When disabling fanspeed monitoring on adl failure, remove any twin GPU association. This could have been leading to hangs on machines with dual GPU cards when ADL failed. - modminer: Don't delay 2nd+ FPGAs during work restart - Disable OpenCL code when not available. - Fix openwrt crashing on regeneratehash() by making check_solve a noop. - Fix sign warning. - Bugfix: icarus: properly store/restore info and work end times across longpoll restarts - Enable modminer for release builds BFGMiner Version 2.4.3 - June 14, 2012 - Change device API "name" to reflect driver name abbreviation instead of device type name - miner.php allow a separate user settings file - modminer: Implement extended device stats to expose each Board to the RPC API - Bugfix: Use new cgpu->thr for longpoll waking - bitforce: Remove 4.5s delay before polling starts, since MiniRig finishes sooner - FPGA - allow device detect override without an open failure - Bugfix: Missing printf value in merge from cgminer - Ensure C compiler is in C99 mode - Add CPU core count detection for BSD/Mac - Set CPU mining idle priority on Windows - can_roll and should_roll should have no bearing on the cycle period within the miner_thread so remove it. - Check for strategy being changed to load balance when enabling LPs. - Check that all threads on the device that called get_work are waiting on getwork before considering the pool lagging. - Iterate over each thread belonging to each device in the hashmeter instead of searching for them now that they're a list. - When using rotate pool strategy, ensure we only select from alive enabled pools. - Start longpoll from every pool when load balance strategy is in use. - Add mandatory and block fields to the work struct. Flag any shares that are detected as blocks as mandatory to submit, along with longpoll work from a previously rejecting pool. - Consider the fan optimal if fanspeed is dropping but within the optimal speed window. - Fix typo in some API messages (succeess/success) - api.c MMQ stat bugs - Bugfix: Fix warnings when built without libudev support - Bugfix: slay a variety of warnings - Bugfix: modminer: Fix unsigned/signed comparison and similar warnings - API add ModMinerQuad support - Bugfix: Honour forceauto parameter in serial_detect functions - modminer: Temperature sensor improvements - modminer: Make log messages more consistent in format - Only adjust GPU speed up if the fanspeed is within the normal fanrange and hasn't been turned to maximum speed under overheat conditions. - ModMiner use valid .name - New driver: BTCFPGA ModMiner - Abstract generally useful FPGA code into fpgautils.c - API add stats for pool getworks - miner.php option to hide specific fields from the display - miner.php add version numbers to the summary page - Update debian configs to v2.4.2 - Add API and FPGA READMEs into Makefile to be included in source distribution. - Icarus - fix unit64_t printf warnings BFGMiner Version 2.4.2 - June 2, 2012 - Use epoll to immediately interrupt Icarus with new work on longpolls (Linux) - API.class compiled with Java SE 6.0_03 - works with Win7x64 - miner.php highlight devs too slow finding shares (possibly failing) - API update version to V1.11 and document changes - API save default config file if none specified - api.c save success incorrectly returns error - api.c replace BUFSIZ (linux/windows have different values) - Move RPC API content out of README to API-README - Open a longpoll connection if a pool is in the REJECTING state as it's the only way to re-enable it automatically. - Use only one longpoll as much as possible by using a pthread conditional broadcast that each longpoll thread waits on and checks if it's the current pool before - If shares are known stale, don't use them to decide to disable a pool for sequential rejects. - Restarting cgminer from within after ADL has been corrupted only leads to a crash. Display a warning only and disable fanspeed monitoring. - Icarus: fix abort calculation/allow user specified abort - Icarus: make --icarus-timing hidden and document it in FPGA-README - Icarus: high accuracy timing and other bitstream speed support - add-MIPSEB-to-icarus-for-BIG_ENDIAN - work_decode only needs swab32 on midstate under BIG ENDIAN - add compile command to api-example.c - save config bugfix: writing an extra ',' when no gpus - Add dpkg-source commits BFGMiner Version 2.4.1 - May 6, 2012 - Icarus: Calibrate hashrate yet even more accurately - In the unlikely event of finding a block, display the block solved count with the pool it came from for auditing. - Display the device summary on exit even if a device has been disabled. - Use correct pool enabled enums in api.c. - Import Debian packaging configs - Ensure we test for a pool recovering from idle so long as it's not set to disabled. - Fix pool number display. - Give BFGMiner -T message only if curses is in use. - Reinit_adl is no longer used. - API 'stats' allow devices to add their own stats also for testing/debug - API add getwork stats to BFGMiner - accesable from API 'stats' - Don't initialise variables to zero when in global scope since they're already initialised. - Get rid of unitialised variable warning when it's false. - Move a pool to POOL_REJECTING to be disabled only after 3 minutes of continuous rejected shares. - Some tweaks to reporting and logging. - API support new pool status - Add a temporarily disabled state for enabled pools called POOL_REJECTING and use the work from each longpoll to help determine when a rejecting pool has started working again. Switch pools based on the multipool strategy once a pool is re-enabled. - Removing extra debug - Fix the benchmark feature by bypassing the new networking code. - Reset sequential reject counter after a pool is disabled for when it is re-enabled. - ztex updateFreq was always reporting on fpga 0 - Trying harder to get 1.15y working - Specifying threads on multi fpga boards extra cgpu - Missing the add cgpu per extra fpga on 1.15y boards - API add last share time to each pool - Don't try to reap curls if benchmarking is enabled. BFGMiner Version 2.4.0 - May 3, 2012 - Only show longpoll warning once when it has failed. - Convert hashes to an unsigned long long as well. - Detect pools that have issues represented by endless rejected shares and disable them, with a parameter to optionally disable this feature. - Bugfix: Use a 64-bit type for hashes_done (miner_thread) since it can overflow 32-bit on some FPGAs - Implement an older header fix for a label existing before the pthread_cleanup macro. - Limit the number of curls we recruit on communication failures and with delaynet enabled to 5 by maintaining a per-pool curl count, and using a pthread conditional that wakes up when one is returned to the ring buffer. - Generalise add_pool() functions since they're repeated in add_pool_details. - Bugfix: Return failure, rather than quit, if BFwrite fails - Disable failing devices such that the user can attempt to re-enable them - Bugfix: thread_shutdown shouldn't try to free the device, since it's needed afterward - API bool's and 1TBS fixes - Icarus - minimise code delays and name timer variables - api.c V1.9 add 'restart' + redesign 'quit' so thread exits cleanly - api.c bug - remove extra ']'s in notify command - Increase pool watch interval to 30 seconds. - Reap curls that are unused for over a minute. This allows connections to be closed, thereby allowing the number of curl handles to always be the minimum necessary to not delay networking. - Use the ringbuffer of curls from the same pool for submit as well as getwork threads. Since the curl handles were already connected to the same pool and are immediately available, share submission will not be delayed by getworks. - Implement a scaleable networking framework designed to cope with any sized network requirements, yet minimise the number of connections being reopened. Do this by create a ring buffer linked list of curl handles to be used by getwork, recruiting extra handles when none is immediately available. - There is no need for the submit and getwork curls to be tied to the pool struct. - Do not recruit extra connection threads if there have been connection errors to the pool in question. - We should not retry submitting shares indefinitely or we may end up with a huge backlog during network outages, so discard stale shares if we failed to submit them and they've become stale in the interim. BFGMiner Version 2.3.6 - April 29, 2012 - Shorten stale share messages slightly. - Protect the freeing of current_hash under mutex_lock to prevent racing on it when set_curblock is hit concurrently. - Change default behaviour to submitting stale, removing the --submit-stale option and adding a --no-submit-stale option. - Make sure to start the getwork and submit threads when a pool is added on the fly. This fixes a crash when a pool is added to running BFGMiner and then switched to. - Faster hardware can easily outstrip the speed we can get work and submit shares when using only one connection per pool. - Test the queued list to see if any get/submits are already queued and if they are, start recruiting extra connections by generating new threads. - This allows us to reuse network connections at low loads but recuit new open connections as they're needed, so that BFGMiner can scale to hardware of any size. BFGMiner Version 2.3.5 - April 28, 2012 - Restarting BFGMiner leads to a socket that can't be bound for 60 seconds, so increase the interval that API binding waits to 30 seconds to minimise the number of times it will retry, spamming the logs. - Give a longpoll message for any longpoll that detects a block change, primary or backup, and also display which pool it was. - Decrease utility display to one decimal place. - Small cosmetic output alignment. - Add pool number to stale share message. - Add space to log output now that there is more screen real estate available. - Indentation clean up. - Remove thread id display from rejected shares as well. - Merge pull request #185 from Diapolo/diakgcn - add goffset support for diakgcn with -v 1 and update kernel version - Set have_longpoll to true when there is at least one pool with longpoll. - Don't display the thread ID since it adds no useful information over the device number. - Don't display the first 8 bytes of a share since they will always be zero at >= 1 difficulty. - work->longpoll is reset across test_work_current so we need to recheck what pool it belongs to. - Use longpolls from backup pools with failover-only enabled just to check for block changes, but don't use them as work. - Start longpoll only after we have tried to extract the longpoll URL. - Check for submitold flag on resubmit of shares, and give different message for stale shares on retry. - Check for submitold before submitstale. - Don't force fresh curl connections on anything but longpoll threads. - Create one longpoll thread per pool, using backup pools for those pools that don't have longpoll. - Use the work created from the longpoll return only if we don't have failover-enabled, and only flag the work as a longpoll if it is the current pool. - This will work around the problem of trying to restart the single longpoll thread on pool changes that was leading to race conditions. - It will also have less work restarts from the multiple longpolls received from different pools. - Remove the invalid entries from the example configuration file. - Add support for latest ATI SDK on windows. - Export missing function from libztex. - miner.php change socktimeoutsec = 10 (it only waits once) - Bugfix: Make initial_args a const char** to satisfy exec argument type warning (on Windows only) - miner.php add a timeout so you don't sit and wait ... forever - Create discrete persistent submit and get work threads per pool, thus allowing all submitworks belonging to the same pool to reuse the same curl handle, and all getworks to reuse their own handle. - Use separate handles for submission to not make getwork potentially delay share submission which is time critical. - This will allow much more reusing of persistent connections instead of opening new ones which can flood routers. - This mandated a rework of the extra longpoll support (for when pools are switched) and this is managed by restarting longpoll cleanly and waiting for a thread join. - miner.php only show the current date header once - miner.php also add current time like single rig page - miner.php display rig 'when' table at top of the multi-rig summary page - README - add some Ztex details - api.c include zTex in the FPGA support list - api.c ensure 'devs' shows PGA's when only PGA code is compiled - miner.c sharelog code consistency and compile warning fix - README correct API version number - README spelling error - api.c combine all pairs of sprintfs() - api.c uncomment and use BLANK (and COMMA) - Code style cleanup - Annotating frequency changes with the changed from value - README clarification of 'notify' command - README update for API RPC 'devdetails' - api.c 'devdetails' list static details of devices - Using less heap space as my TP-Link seems to not handle this much BFGMiner Version 2.3.4 - April 26, 2012 - New maintainership of code with modular FPGA/GPU focus, under BFGMiner name - Complete working support for cross-compiling Windows builds on Linux. - Fix usage of low --scan-time settings so it doesn't busy-loop - JSON API: Add new 'devdetail' command to get fixed device information - JSON API: Implement driver abstraction for extra device status - Icarus: Use epoll to wait for serial port input properly, when available - Icarus: Workaround buggy USB-UART that causes Icarus to stop mining rarely - Icarus: Estimate mining hashrate correctly, calibrated from real-world data - Icarus: Parallelize work setup with Icarus hash search improving performance - Icarus: More reliable detection and runtime - OpenCL: Move GPU-specific data fetching from JSON API to OpenCL driver - OpenCL: Dynamically load OpenCL library, to be more vendor-independent and allow use without actually having OpenCL (i.e. FPGA-only rigs). CGMiner Version 2.3.4 - April 25, 2012 - Extensively document the cause of GPU device issues and the use of --gpu-map. - Support for share logging - Detect poorly performing combination of SDK and phatk kernel and add verbose warning at startup. - Icarus update to new add_cgpu() - Icarus driver working with Linux and Windows - api.c fix unused variable compile warning - Display all OpenCL devices when -n is called as well to allow debugging of differential mapping of OpenCL to ADL. - Add a --gpu-map option which will allow arbitrarily mapping ADL devices to OpenCL devices for instances where association by enumeration alone fails. - Increase upper limit on number of extra items to queue as some FPGA code can't yet reliably keep many devices busy. - Display configuration file information when -c option is passed and only when file exists on loading default config file. - Display configuration file loaded, if any, and debug output if configuration file parsing failed. - Add missing ztex header to Makefile for distribution. - Document long-form COM port device names on Windows, required to specify serial ports above 9 - Include ztex bitstreams firmware in distribution and install if configured in. - Style police on driver-ztex.c - work_restart should only be changed by cgminer.c now - Shut down the api cleanly when the api thread is cancelled. This should allow the api socket to be closed successfully to next be reopened with app_restart. - Make a union for cgpu device handles, and rename "device" to "device_ztex" since it's Ztex-specific - Initialise name variable. - Remove unnecessary check for variable that always has memory allocated. - Bugfix: Missing "break" no-op in default case - Make the status window and log window as large as can fit on startup, rechecking to see if it can be enlarged after the fact. This allows any number of devices to be displayed provided the window is made long enough without corrupting the output. - Style police on libztex.c. - API add removepool like the screen interface - api.c escape required characters in return strings + pools returns the username - Set lp_path to NULL after free for consistency. - Removing dmalloc import left behind by mistake - Fixing leak in resp_hdr_cb - miner.php warning highlight GPU stats if they are zero (e.g. ADL not enabled) - miner.php highlight any device that isn't 'Enabled' - miner.php highlight any Status that isn't 'Alive' - miner.php optionally support multiple rigs - Initial Ztex support 1.15x board. CGMiner Version 2.3.3 - April 15, 2012 - Don't even display that cpumining is disabled on ./configure to discourage people from enabling it. - Do a complete cgminer restart if the ATI Display Library fails, as it does on windows after running for some time, when fanspeed reporting fails. - Cache the initial arguments passed to cgminer and implement an attempted restart option from the settings menu. - Disable per-device status lines when there are more than 8 devices since screen output will be corrupted, enumerating them to the log output instead at startup. - Reuse Vals[] array more than W[] till they're re-initialised on the second sha256 cycle in poclbm kernel. - Minor variable alignment in poclbm kernel. - Make sure to disable devices with any status not being DEV_ENABLED to ensure that thermal cutoff code works as it was setting the status to DEV_RECOVER. - Re-initialising ADL simply made the driver fail since it is corruption over time within the windows driver that's responsible. Revert "Attempt to re-initialise ADL should a device that previously reported fanspeed stops reporting it." - Microoptimise poclbm kernel by ordering Val variables according to usage frequency. CGMiner Version 2.3.2 - March 31, 2012 - Damping small changes in hashrate so dramatically has the tendency to always make the hashrate underread so go back to gentle damping instead. - Revert the crossover of variables from Vals to W in poclbm kernel now that Vals are the first declared variables so they're used more frequently. - Vals variables appearing first in the array in poclbm is faster. - Change the preferred vector width to 1 for Tahiti only, not all poclbm kernels. - Use a time constant 0.63 for when large changes in hashrate are detected to damp change in case the large change is an aliasing artefact instead of a real chang - Only increment stale counter if the detected stales are discarded. - Attempt to re-initialise ADL should a device that previously reported fanspeed stops reporting it. - Move the ADL setup and clearing to separate functions and provide a reinit_adl function to be used when adl fails while running. - Use slightly more damping on the decay time function in the never-ending quest to smooth off the hashmeter. - Set the starting fanspeed to a safe and fairly neutral 50% when autofan is enabled. - Provide locking around updates of cgpu hashrates as well to prevent multiple threads accessing data fields on the same device. - Display the beginning of the new block in verbose mode in the logs. - Reinstate old diablo kernel variable ordering from 120222, adding only goffset and vector size hint. The massive variable ordering change only helped one SDK on - Change the version number on the correct kernels. - api.c devicecode/osinfo incorrectly swapped for json - Add extensive instructions on how to make a native windows build. - Update version numbers of poclbm and diablo kernels as their APIs have also changed. - Use global offset parameter to diablo and poclbm kernel ONLY for 1 vector kernels. - Use poclbm preferentially on Tahiti now regardless of SDK. - Remove unused constant passed to poclbm. - Clean up use of macros in poclbm and use bitselect everywhere possible. - Add vector type hint to diablo kernel. - Add worksize and vector attribute hints to the poclbm kernel. - Spaces for non-aligned variables in poclbm. - Swap Vals and W variables where they can overlap in poclbm. - More tidying of poclbm. - Tidy up first half of poclbm. - Clean up use of any() by diablo and poclbm kernels. - Minor variable symmetry changes in poclbm. - Put additions on separate lines for consistency in poclbm. - Consolidate last use of W11 into Vals4 in poclbm. - Change email due to SPAM - api.c miner.php add a '*' to the front of all notify counters - simplifies future support of new counters - miner.php add display 'notify' command - Small change to help arch's without processor affinity - Fix bitforce compile error - api.c notify should report disabled devices also - of course - API returns the simple device history with the 'notify' command - code changes for supporting a simple device history - api.c Report an OS string in config to help with device issues - api.c fix Log Interval - integer in JSON - api.c config 'Device Code' to show list of compiled devices + README - api.c increase buffer size close to current code allowable limit - removed 8-component vector support from kernel, as this is not supported in CGMINER anyway - forgot to update kernel modification date, fixed ;) - reordered an addition in the kernel, which results in less instructions used in the GPU ISA code for GCN - miner.php: option for readonly or check privileged access - Ignore reduntant-with-build options --disable-gpu, --no-adl, and --no-restart - miner.php: ereg_replace is DEPRECATED so use preg_replace instead - Make curses TUI support optional at compile-time. - Bugfix: AC_ARG_WITH provides withval instead of enableval - miner.php split devs output for different devices - api.c: correct error messages - icarus.c modify (regular) timeout warning to only be debug - icarus.c set the windows TODO timeout - Allow specifying a specific driver for --scan-serial - optimized nonce-check and output code for -v 2 and -v 4 - Bugfix: Check for libudev header (not just library) in configure, and document optional dependency - Add API support for Icarus and Bitforce - Next API version is 1.4 (1.3 is current) - README/api.c add "When" the request was processed to STATUS - Bugfix: ZLX to read BitFORCE temp, not ZKX -.- - Use libudev to autodetect BitFORCE GPUs, if available - Use the return value of fan_autotune to set fan_optimal instead of passing it as a pointer. - Pass the lasttemp from the device we're using to adjust fanspeed in twin devices. - fix the name to 3 chars, fix the multi-icarus support - Bugfix: "-S auto" is the default if no -S is specified, and there is no such delay in using it - README add information missing from --scan-serial - Update README RPC API Version comment - Bugfix: Allow enabling CPU even without OpenCL support - Change failed-to-mine number of requested shares messge to avoid segfault on recursive calling of quit(). - Get rid of extra char which is just truncated in poclbm kernel. - only small code formating changes - removed vec_step() as this could lead to errors on older SDKs - unified code for generating nonce in kernel and moved addition of base to the end -> faster CGMiner Version 2.3.1 - February 24, 2012 - Revert input and output code on diakgcn and phatk kernels to old style which worked better for older hardware and SDKs. - Add a vector*worksize parameter passed to those kernels to avoid one op. - Increase the speed of hashrate adaptation. - Only send out extra longpoll requests if we want longpolls. - API implement addpool command - API return the untouched Total MH also (API now version 1.3) - Add enable/disablepool to miner.php example and reduce font size 1pt CGMiner Version 2.3.0 - February 23, 2012 - Consider extra longpoll work items as staged_extra so as to make sure we queue more work if queueing regular work items as longpolls. - Use diablo kernel on all future SDKs for Tahiti and set preferred vector width to 1 on poclbm kernel only. - Explicitly type the constants in diakgcn kernel as uint, to be in line with poclbm kernel. - Reset all hash counters at the same time as resetting start times to get accurate hashrates on exiting which is mandatory for benchmarking. - Report thread out before it starts to avoid being flagged as sick when waiting for the first work item. - Don't disable and re-enable devices as they may recover and in the meantime have their status set to OFF. - API new commands enablepool and disablepool (version already incremented) - Tolerate new-format temperature readings for bitforce - Modify cgminer.c pool control to allow API to call it - Bugfix: Fix BitFORCE driver memory leak in debug logging - Extra byte was being unused in poclbm leading to failure on some platforms. - Explicitly type the constants in poclbm kernel as uint. - Don't save 'include' when saving the configuration - Allow configuration file to include another recursively - Use the SDK and hardware information to choose good performing default kernels. - Move phatk kernel to offset vector based nonce bases as well. - Add a --benchmark feature which works on a fake item indefinitely to compare device performance without any server or networking influence. - Allow writing of multiple worksizes to the configuration file. - Allow writing of multiple vector sizes to the configuration file. - Allow writing of multiple kernels to the configuration file. - Allow multiple different kernels to be chosen per device. - Allow the worksize to be set per-device. - Allow different vectors to be set per device. - If we're well below the target temperature, increase gpu engine speed back to maximum in case we have gotten lost between profiles during an idle period. - We should be setting the value of fan_optimal, not its address. - As all kernels will be new versions it's an opportunity to change the .bin format and make it simpler. Specifying bitalign is redundant and long can be l. - Use any() in kernel output code. - Put the nonce for each vector offset in advance, avoiding one extra addition in the kernel. - Reset times after all mining threads are started to make estimating hashrates easier at startup. - Bugfix: allow no-exec (NX) stack - Fix minor warning. - fix the bitforce.c code style follow 1TBS - fix icarus.c compile warning - small changes to speedup no vec for AMD 898.1 OCL runtime - Update licensing to GPL V3. - Reset the longpoll flag after it's been used once to prevent it restarting work again. - Begin import of DiabloMiner kernel. - Modify API debug messages to say API instead of DBG - When API shuts down cgminer don't kill itself - Don't make rolled work from the longpoll be seen as other longpoll work items. - API add 'privileged' command so can verify access level - Set the lp_sent variable under lock since there will almost always be a race on setting this variable, potentially leading to multiple LPs being sent out. - API restrict access to all non display commands by default - Update API version to 1.2 for new 'Log Interval' - API add --log Interval to 'config' reply - --api-allow special case 0/0 means all CGMiner Version 2.2.7 - February 20, 2012 - Send out extra longpolls when we have switched pools and the longpoll thread is still bound to the old one. This is particularly useful with p2pool where longpolls do not correlate with main bitcoin block change and would have led to high reject rates on failover. - Store whether a work item is the result of a longpoll or not in struct work and use it to help determine block changes directly from the work longpoll bool. - Keep track of when a longpoll has been sent for a pool and if the current pool is requesting work but has not sent a longpoll request, convert one of the work items to a longpoll. - Store the longpoll url in the pool struct and update it from the pool_active test in case it changes. This is to allow further changes to longpoll management on switching pools. - Re-check for a longpoll supporting pool every 30 seconds if none is found initially. - Report threads as busy waiting on getwork on startup to avoid them being flagged sick on startup during slow networking. - Allow devices that are disabled due to overheating to be flagged as recovering instead of disabling them and re-enable them if they're below ideal temperatures - Tahiti prefers worksize 64 with poclbm. - No need to expressly retain the opencl program now that the zero binary issue is fixed. This actually fixes cgminer to work with the latest SDK included with the ATI catalyst driver 12.2. - Show error code on any opencl failure status. - Add detection for version 898.1 SDK as well but only give SDK 2.6 warning once on startup instead of with each device initialisation. - Always use a fresh connection for longpoll as prolonged persistent connections can fail for many reasons. - Keep track of intended engine clock speed and only adjust up if it's higher than the last intended speed. This avoids setting the clock speed to one relative to a lower profile one by mistake. - Use gpu-memdiff on startup if an engine clockspeed is set and a memdiff value is set. - Revert "Adjust engine speed up according to performance level engine setting, not the current engine speed." - ineffectual. - Freeze the queues on all threads that are sent the pause message to prevent them trying to start up again with saved pings in their queues. - Updates to diakgcn kernel/ - Consolidate all screen updates to the watchdog thread and touch both windows before refresh. - Curses will be disabled in clean_up so don't do it early in kill_work, and disable_adl so that GPU settings may be restored to normal in case shutting down curses leads to instability on windows. - Stop the mining threads before trying to kill them. - Plain refresh() does not give reliably screen updates so get rid of all uses of it. - First release with working diakgcn kernel. CGMiner Version 2.2.6 - February 16, 2012 - Provide warning on each startup about sdk 2.6 - Fix unused warnings on win32. - bitforce: Simplify BFopen WIN32 ifdef/else - Fix initialization warning with jansson 1.3 - bitforce: Cleanup extraneous TODO that isn't needed - Move tcsetattr (and new tcflush) into *nix BFopen to simplify things a bit - Add message explaining 2nd thread disabling for dynamic mode and how to tune it. - Move logwindow down once number of devices is known. - Automatically choose phatk kernel for bitalign non-gcn ATI cards, and then only select poclbm if SDK2.6 is detected. - Allow the refresh interval to be adjusted in dynamic intensity with a --gpu-dyninterval parameter. - Make curses display visible right from the beginning and fix the window sizes so the initial messages don't get lost once the status window is drawn. - The amount of work scanned can fluctuate when intensity changes and since we do this one cycle behind, we increment the work more than enough to prevent repeati - bitforce: Set a 30 second timeout for serial port on Windows, since the default is undefined - Use PreVal4addT1 instead of PreVal4 in poclbm kernel. - Import PreVal4 and PreVal0 into poclbm kernel. - Import more prepared constants into poclbm kernel. - Keep variables in one array but use Vals[] name for consistency with other kernel designs. - Replace constants that are mandatorily added in poclbm kernel with one value. - Remove addition of final constant before testing for result in poclbm kernel. - Hand optimise variable addition order. - Hand optimise first variable declaration order in poclbm kernel. - Radical reordering machine based first pass to change variables as late as possible, bringing their usage close together. - fix strcpy NULL pointer if env HOME unset. - bitforce: Disable automatic scanning when at least one device is specified manually - Unroll all poclbm additions to enable further optimisations. CGMiner Version 2.2.5 - February 13, 2012 - Make output buffer write only as per Diapolo's suggestion. - Constify nonce in poclbm. - Use local and group id on poclbm kernel as well. - Microoptimise phatk kernel on return code. - Adjust engine speed up according to performance level engine setting, not the current engine speed. - Try to load a binary if we've defaulted to the poclbm kernel on SDK2.6 - Use the poclbm kernel on SDK2.6 with bitalign devices only if there is no binary available. - Further generic microoptimisations to poclbm kernel. - The longstanding generation of a zero sized binary appears to be due to the OpenCL library putting the binary in a RANDOM SLOT amongst 4 possible binary locations. Iterate over each of them after building from source till the real binary is found and use that. - Fix harmless warnings with -Wsign-compare to allow cgminer to build with -W. - Fix missing field initialisers warnings. - Put win32 equivalents of nanosleep and sleep into compat.h fixing sleep() for adl.c. - Restore compatibility with Jansson 1.3 and 2.0 (api.c required 2.1) - Modularized logging, support for priority based logging - Move CPU chipset specific optimization into device-cpu CGMiner Version 2.2.4 - February 11, 2012 - Fix double definition of A0 B0 to zeroA zeroB. - Retain cl program after successfully loading a binary image. May decrease failures to build kernels at startup. - Variable unused after this so remove setting it. - BFI INT patching is not necessarily true on binary loading of files and not true on ATI SDK2.6+. Report bitalign instead. - Various string fixes for reject reason. - Generalize --temp-cutoff and implement support for reading temperature from BitFORCE FPGAs - Change message from recovered to alive since it is used on startup as well as when a pool has recovered. - Start mining as soon as any pool is found active and rely on the watchpool thread to bring up other pools. - Delayed responses from testing pools that are down can hold up the watchdog thread from getting to its device testing code, leading to false detection of the GPU not checking in, and can substantially delay auto gpu/auto fan management leading to overheating. Move pool watching to its own thread. - Bugfix: BitFORCE index needs to be static to count correctly - Space out retrieval of extra work according to the number of mining threads. - Make shutdown more robust. Enable the input thread only after the other threads exist. Don't kill off the workio thread and use it to exit main() only if there is an unexpected problem. Use kill_work() for all anticipated shutdowns where possible. Remove unused thread entry. - Change poclbm version number. - One array is faster than 2 separate arrays so change to that in poclbm kernel. - Microoptimisations to poclbm kernel which increase throughput slightly. - Import diablominer kernel. Currently disabled as not working. - Import diapolo kernel. Currently disabled as not working. - Conflicting entries of cl_kernel may have been causing problems, and automatically chosen kernel type was not being passed on. Rename the enum to cl_kernels and store the chosen kernel in each clState. - Set cl_amd_media_ops with the BITALIGN flag and allow non-bitselect devices to build. - ALlow much longer filenames for kernels to load properly. - Allow different kernels to be used by different devices and fix the logic fail of overcorrecting on last commit with !strstr. - Fix kernel selection process and build error. - queue_phatk_kernel now uses CL_SET_VARG() for base-nonce(s), too - added OpenCL >= 1.1 detection code, in preparation of OpenCL 1.1 global offset parameter support - Use K array explicitly to make it clear what is being added. - Work items have a tendency to expire at exactly the same time and we don't queue extra items when there are plenty in the queue, regardless of age. Allow extra work items to be queued if adequate time has passed since we last requested work even if over the limit. - Discard work when failover-only is enabled and the work has come from a different pool. - Missing include to build on newer mingw32. - Move from the thread safe localtime_r to regular localtime which is the only one supported on newer pthread libraries on mingw32 to make it compile with the newer ming. Thread safety is of no importance where localtime is used in this code. - Define in_addr_t in windows if required - sys/wait.h not required in windows - Allow API to restrict access by IP address - Add pool switching to example miner.php - Display X-Reject-Reason, when provided - Remove the test for whether the device is on the highest profil level before raising the GPU speed as it is ineffectual and may prevent raising the GPU speed. - Remove unnecessary check for opt_debug one every invocation of applog at LOG_DEBUG level and place the check in applog(). CGMiner Version 2.2.3 - February 6, 2012 - Revert "Rewrite the convoluted get_work() function to be much simpler and roll work as much as possible with each new work item." This seems to cause a race on work in free_work(). Presumably other threads are still accessing the structure. CGMiner Version 2.2.2 - February 6, 2012 - Provide support for the submitold extension on a per-pool basis based on the value being detected in a longpoll. - Don't send a ping to a dynamic device if it's not enabled as that will just enable it for one pass and then disable it again. - Rewrite the convoluted get_work() function to be much simpler and roll work as much as possible with each new work item. - Roll as much work as possible from the work returned from a longpoll. - Rolling work on each loop through the mining thread serves no purpose. - Allow to stage more than necessary work items if we're just rolling work. - Replace divide_work with reuse_work function used twice. - Give rolled work a new ID to make sure there is no confusion in the hashtable lookups. - Remove now-defunct hash_div variables. - Remove unused get_dondata function. - Silence ADL warnings. - Silence unused parameter warnings. - Stagger the restart of every next thread per device to keep devices busy ahead of accessory threads per device. - Deprecate the --donation feature. Needlessly complex, questionable usefulness, depends on author's server and a central pool of some kind, and was not heavily adopted. - It's devices that report back now, not threads, update message. - Continue auto-management of fan and engine speeds even if a device is disabled for safety reasons. - No need to check we're highest performance level when throttling GPU engine speed. - Abstract out tests for whether work has come from a block that has been seen before and whether a string is from a previously seen block. - Probe but don't set the timeout to 15 seconds as some networks take a long time to timeout. - Remove most compiler warnings from api.c - Add last share's pool info in cgpu_info - Allow the OpenCL platform ID to be chosen with --gpu-platform. - Iterate over all platforms displaying their information and number of devices when --ndevs is called. - Deprecate main.c - Some networks can take a long time to resolve so go back to 60 second timeouts instead of 15. - Only enable curses on failure if curses is desired. - Fix warnings in bitforce.c - Bugfix: Need to open BitForce tty for read-write - Fix various build issues. - Modularize code: main.c -> device-cpu + device-gpu - Fix phatk kernel not working on non-bitalign capable devices (Nvidia, older ATI). - Update poclbm kernel for better performance on GCN and new SDKs with bitalign support when not BFI INT patching. Update phatk kernel to work properly for non BFI INT patched kernels, providing support for phatk to run on GCN and non-ATI cards. - Return last accepted share pool/time for devices - Display accepted share pool/time for CPUs - Bug intensity always shows GPU 0 - Update example web miner.php to use new API commands CGMiner Version 2.2.1 - January 30, 2012 NOTE - The GPU Device reordering in 2.2.0 by default was considered a bad idea so the original GPU ordering is used by default again unless reordering is explicitly requested. - Fix bitforce failing to build into cgminer. - Add missing options to write config function. - Add a --gpu-reorder option to only reorder devices according to PCI Bus ID when requested. - Fix for midstate support being broken on pools that supported no-midstate work by ensuring numbers are 32 bits in sha2.c - Set virtual GPUs to work when ADL is disabled or all mining will occur on GPU 0. - Add information about paused threads in the menu status. - Disable all but the first thread on GPUs in dynamic mode for better interactivity. - Set the latest network access time on share submission for --net-delay even if we're not delaying that submission for further network access. - Clear adl on exiting after probing values since it may attempt to overclock. - As share submission is usually staggered, and delays can be costly, submit shares without delay even when --net-delay is enabled. - Display GPU number and device name when ADL is successfully enabled on it. - Display GPU ordering remapping in verbose mode. - Don't fail in the case the number of ADL and OpenCL devices do not match, and do not attempt to reorder devices unless they match. Instead give a warning about - Display error codes should ADL not return ADL_OK in the more critical function calls. - Fix unused warning. - Fix compile warnings in api.c - Add extensive ADL based device info in debug mode. - Make --ndevs display verbose opencl information as well to make debugging version information easier. - Display information about the opencl platform with verbose enabled. - Explicitly check for nvidia in opencl platform strings as well. CGMiner Version 2.2.0 - January 29, 2012 NOTE: GPU Device order will change with this release with ATI GPUs as cgminer now can enumerate them according to their Bus ID which means the values should now correlate with their physical position on the motherboard. - Default to poclbm kernel on Tahiti (7970) since phatk does not work, even though performance is sub-standard so that at least it will mine successfully by defau - Retain cl program after every possible place we might build the program. - Update ADL SDK URL. - Fix potential overflow. - Map GPU devices to virtual devices in their true physical order based on BusNumber. - Change the warning that comes with failure to init cl on a device to be more generic and accurate. - Advertise longpoll support in X-Mining-Extensions - Detect dual GPU cards by iterating through all GPUs, finding ones without fanspeed and matching twins with fanspeed one bus ID apart. - Do not attempt to build the program that becomes the kernel twice. This could have been leading to failures on initialising cl. - Some opencl compilers have issues with no spaces after -D in the compiler options. - Allow intensity up to 14. - Use calloced stack memory for CompilerOptions to ensure sprintf writes to the beginning of the char. - Whitelist 79x0 cards to prefer no vectors as they perform better without. - Adjust fan speed gently while in the optimal range when temperature is drifting to minimise overshoot in either direction. - Detect dual GPU cards via the indirect information of - 1st card has a fan controller. 2nd card does not have a fan controller, cards share the same device name - Instead of using the BFI_INT patching hack on any device reporting cl_amd_media_ops, create a whitelist of devices that need it. This should enable GCN architec - Fixed API compiling issue on OS X - Add more explanation of JSON format and the 'save' command - Return an error if using ADL API commands when it's not available - Read off lpThermalControllerInfo from each ADL device. - Add ADL_Overdrive5_ThermalDevices_Enum interface. - Add API commands: config, switchpool, gpu settings, save - Implement socks4 proxy support. - Fix send() for JSON strings - Introduce a --net-delay option which guarantees at least 250ms between any networking requests to not overload slow routers. - Generalise locking init code. - Allow invalid values to be in the configuration file, just skipping over them provided the rest of the file is valid JSON. This will allow older configurat - Allow CPU mining explicitly enable only if other mining support is built in. - BitForce FPGA support - Configure out building and support of all CPU mining code unless --enable-cpumining is enabled. - Allow parsed values to be zero which will allow 0 values in the config file to work. - Advertise that we can make our own midstate, so the pool can skip generating it for us - Refactor the CPU scanhash_* functions to use a common API. Fixes bugs. - Don't consider a pool lagging if a request has only just been filed. This should decrease the false positives for "pool not providing work fast enough". - Invalidating work after longpoll made hash_pop return no work giving a false positive for dead pool. Rework hash_pop to retry while finds no staged work u - Remove TCP_NODELAY from curl options as many small packets may be contributing to network overload, when --net-delay is enabled. - Refactor miner_thread to be common code for any kind of device - Simplify submit_nonce loop and avoid potentially missing FOUND - 1 entry. Reported by Luke-Jr. - Micro-optimisation in sha256_sse2 code courtesy of Guido Ascioti guido.ascioti@gmail.com - Refactor to abstract device-specific code CGMiner Version 2.1.2 - January 6, 2012 - If api-description is specified, save it when writing the config file - Adjust utility width to be constant maximum as well. - Add percent signs to reject ratio outputs - Should the donation pool fail, don't make the fallover pool behave as though the primary pool is lagging. - Use an alternative pool should the donation getwork fail. CGMiner Version 2.1.1 - January 1, 2012 - Include API examples in distribution tarball. - Don't attempt to pthread_join when cancelling threads as they're already detached and doing so can lead to a segfault. - Give more generic message if slow pool at startup is the donation pool. - Continue to attempt restarting GPU threads if they're flagged dead at 1 min. intervals. - Don't attempt to restart sick flagged GPUs while they're still registering activity. - Make curl use fresh connections whenever there is any communication issue in case there are dead persistent connections preventing further comms from working. - Display pool in summary if only 1 pool. - Adjust column width of A/R/HW to be the maximum of any device and align them. CGMiner Version 2.1.0 - December 27, 2011 - Major infrastructure upgrade with RPC interface for controlling via sockets encoded with/without JSON courtesy of Andrew Smith. Added documentation for use of the API and sample code to use with it. - Updated linux-usb-cgminer document. - Rewrite of longpoll mechanism to choose the current pool wherever possible to use for the longpoll, or any pool that supports longpoll if the current one does not. - Display information about longpoll when the chosen server has changed. - Fix the bug where longpoll generated work may have been sent back to the wrong pool, causing rejects. - Fix a few race conditions on closing cgminer which caused some of the crashes on exit. - Only adjust gpu engine speed in autotune mode if the gpu is currently at the performance level of that being adjusted. - Various fixes for parsing/writing of configuration files. - Do not add blank lines for threads of unused CPUs. - Show which pool is unresponsive on startup. - Only show GPU management menu item if GPUs are in use. - Align most device columns in the curses display. CGMiner Version 2.0.8 - November 11, 2011 - Make longpoll do a mandatory flushing of all work even if the block hasn't changed, thus supporting longpoll initiated work change of any sort and merged mining. - Byteswap computed hash in hashtest so it can be correctly checked. This fixes the very rare possibility that a block solve on solo mining was missed. - Add x86_64 w64 mingw32 target - Allow a fixed speed difference between memory and GPU clock speed with --gpu-memdiff that will change memory speed when GPU speed is changed in autotune mode. - Don't load the default config if a config file is specified on the command line. - Don't build VIA on apple since -a auto bombs instead of gracefully ignoring VIA failing. - Build fix for dlopen/dlclose errors in glibc. CGMiner Version 2.0.7 - October 17, 2011 - Support work without midstate or hash1, which are deprecated in bitcoind 0.5+ - Go to kernel build should we fail to clCreateProgramWithBinary instead of failing on that device. This should fix the windows problems with devices not initialising. - Support new configuration file format courtesy of Chris Savery which can write the config file from the menu and will load it on startup. - Write unix configuration to .cgminer/cgminer.conf by default and prompt to overwrite if given a filename from the menu that exists. CGMiner Version 2.0.6 - October 9, 2011 - Must initialise the donorpool mutex or it fails on windows. - Don't make donation work interfere with block change detection allowing donation to work regardless of the block chain we're mining on. - Expire shares as stale with a separate timeout from the scantime, defaulting to 120 seconds. - Retry pools after a delay of 15 seconds if none can be contacted on startup unless a key is pressed. - Don't try to build adl features without having adl. - Properly check shares against target difficulty - This will no longer show shares when solo mining at all unless they're considered to be a block solve. - Add altivec 4 way (cpu mining) support courtesy of Gilles Risch. - Try to use SSL if the server supports it. - Display the total solved blocks on exit (LOL if you're lucky). - Use ADL activity report to tell us if a sick GPU is still busy suggesting it is hard hung and do not attempt to restart it. CGMiner Version 2.0.5 - September 27, 2011 - Intensity can now be set to dynamic or static values per-device. - New donation feature --donation sends a proportion of shares to author's account of choice, but is disabled by default! - The hash being displayed and block detection has been fixed. - Devices not being mined on will not attempt to be ADL managed. - Intensity is now displayed per GPU device. - Make longpoll attempt to restart as often as opt_retries specifies. - We weren't rolling work as often as we could. - Correct some memory management issues. - Build fixes. - Don't mess with GPUs if we don't have them. CGMiner Version 2.0.4 - September 23, 2011 - Confused Longpoll messages should be finally fixed with cgminer knowing for sure who found the new block and possibly avoiding a rare crash. - Display now shows the actual hash and will say BLOCK! if a block is deemed solved. - Extra spaces, which would double space lines on small terminals, have been removed. - Fan speed change is now damped if it is already heading in the correct direction to minimise overshoot. - Building without opencl libraries is fixed. - GPUs are autoselected if there is only one when in the GPU management menu. - GPU menu is refreshed instead of returning to status after a GPU change. CGMiner Version 2.0.3 - September 17, 2011 - Various modes of failure to set fanspeeds and adl values have been addressed and auto-fan should work now on most hardware, and possibly other values which previously would not have worked. - Fixed a crash that can occur on switching pools due to longpoll thread races. - Use ATISTREAMSDKROOT if available at build time. - Fanspeed management is returned to the driver default on exit instead of whatever it was when cgminer was started. - Logging of events deemed WARNING or ERR now will display even during periods where menu input is being awaited on. CGMiner Version 2.0.2 - September 11, 2011 - Exit cleanly if we abort before various threads are set up or if they no longer exist. - Fix a rare crash in HASH_DEL due to using different mutexes to protect the data. - Flag devices that have never started and don't allow enabling of devices without restarting them. - Only force the adapter speed to high if we've flagged this device as being managed. - Flag any devices with autofan or autogpu as being managed. - Use a re-entrant value to store what fanspeed we're trying to set in case the card doesn't support small changes. Force it to a multiple of 10% if it fails on trying to speed up the fan. - Do not bother resetting values to old ones if changes to GPU parameters report failure, instead returning a failure code only if the return value from get() differs. - Remove redundant check. - Only display supported values from fanspeed on change settings. - Missing bracket from output. - Display fan percentage on devices that only support reporting percent and not RPM. - Properly substitute DLOPEN flags to build with ADL support when -ldl is needed and not when opencl is not found. CGMiner Version 2.0.1 - September 9, 2011 - Fix building on 32bit glibc with dlopen with -lpthread and -ldl - ByteReverse is not used and the bswap opcode breaks big endian builds. Remove it. - Ignore whether the display is active or not since only display enabled devices work this way, and we skip over repeat entries anwyay. - Only reset values on exiting if we've ever modified them. - Flag adl as active if any card is successfully activated. - Add a thermal cutoff option as well and set it to 95 degrees by default. - Change the fan speed by only 5% if it's over the target temperature but less than the hysteresis value to minimise overshoot down in temperature. - Add a --no-adl option to disable ADL monitoring and GPU settings. - Only show longpoll received delayed message at verbose level. - Allow temperatures greater than 100 degrees. - We should be passing a float for the remainder of the vddc values. - Implement accepting a range of engine speeds as well to allow a lower limit to be specified on the command line. - Allow per-device fan ranges to be set and use them in auto-fan mode. - Display which GPU has overheated in warning message. - Allow temperature targets to be set on a per-card basis on the command line. - Display fan range in autofan status. - Setting the hysteresis is unlikely to be useful on the fly and doesn't belong in the per-gpu submenu. - With many cards, the GPU summaries can be quite long so use a terse output line when showing them all. - Use a terser device status line to show fan RPM as well when available. - Define max gpudevices in one macro. - Allow adapterid 0 cards to enumerate as a device as they will be non-AMD cards, and enable ADL on any AMD card. - Do away with the increasingly confusing and irrelevant total queued and efficiency measures per device. - Only display values in the log if they're supported and standardise device log line printing. CGMiner Version 2.0.0 - September 6, 2011 Major feature upgrade - GPU monitoring, (over)clocking and fan control for ATI GPUs. New command line switches: --auto-fan- Automatically adjust all GPU fan speeds to maintain a target temperature --auto-gpu- Automatically adjust all GPU engine clock speeds to maintain a target temperature --gpu-engine Set the GPU engine (over)clock in Mhz - one value for all or separate by commas for per card. --gpu-fan Set the GPU fan percentage - one value for all or separate by commas for per card. --gpu-memclock Set the GPU memory (over)clock in Mhz - one value for all or separate by commas for per card. --gpu-powertune Set the GPU powertune percentage - one value for all or separate by commas for per card. --gpu-vddc Set the GPU voltage in Volts - one value for all or separate by commas for per card. --temp-hysteresis Set how much the temperature can fluctuate outside limits when automanaging speeds (default: 3) --temp-overheat Set the overheat temperature when automatically managing fan and GPU speeds (default: 85) --temp-target Set the target temperature when automatically managing fan and GPU speeds (default: 75) - Implement ATI ADL support for GPU parameter monitoring now and setting later (temp, fan, clocks etc.). - Check for the presence of the ADL header files in ADL_SDK. - Import adl_functions.h from amd overdrive ctrl. - Implement a setup function that tries to detect GPUs that support the ADL and link in the parameters into the gpus struct. - Put a summary of monitoring information from the GPU menu. - Implement changing memory speed and voltage on the fly. - Implement fan speed setting. - Minor corrections to set fan speed by percentage. - Make sure to read off the value in RPM only. - Implement auto fanspeed adjustment to maintain a target temperature and fanspeed below 85%, with an overheat check that will speed the fan up to 100%. - Add an --auto-fan command line option to allow all GPUs to have autofan enabled from startup. - Add a gpu autotune option which adjusts GPU speed to maintain a target temperature within the bounds of the default GPU speed and any overclocking set. - Avoid a dereference if the longpoll thread doesn't exist. - Clean up by setting performance profiles and fan settings to startup levels on exit. - Add a small amount of hysteresis before lowering clock speed. - Allow target, overheat and hysteresis temperatures to be set from command line. - Combine all stats collating into one function to avoid repeating function calls on each variable. - Add gpu statistics to debugging output via the watchdog thread. - Implement menus to change temperature limits. - Implement setting the GPU engine clock speed of all devices or each device as a comma separated value. - Implement setting the GPU memory clock speed of all devices or each device as a comma separated value. - Implement setting the GPU voltage of all devices or each device as a comma separated value. - Implement setting the GPU fan speed of all devices or each device as a comma separated value. - Add support for monitoring powertune setting. - Implement changing of powertune value from the GPU change settings menu. - Get the value of powertune in get_stats. - Implement setting the GPU powertune value of all devices or each device as a comma separated value. - Remove the safety checks in speed setting since confirmation is done first in the menu, then show the new current values after a short pause. - Force the speed to high on startup and restore it to whatever the setting was on exit. - Add temperature to standard output where possible and use more compact output. - Move and print at the same time in curses to avoid random trampling display errors. - Update the status window only from the watchdog thread, do not rewrite the top status messages and only refresh once all the status window is complete, clearing the window each time to avoid corruption. - Set a safe starting fan speed if we're automanaging the speeds. - Provide locking around all adl calls to prevent races. - Lower profile settings cannot be higher than higher profile ones so link any drops in settings. - Add new needed text files to distribution. - Queue requests ignoring the number of staged clones since they get discarded very easily leading to false positives for pool not providing work fast enough. - Include libgen.h in opt.c to fix win32 compilation warnings. - Fix compilation warning on win32. - Add the directory name from the arguments cgminer was called from as well to allow it running from a relative pathname. - Add a --disable-adl option to configure and only enable it if opencl support exists. - Retry before returning a failure to get upstream work as a failure to avoid false positives for pool dead. - Retry also if the decoding of work fails. - Use the presence of X-Roll-Ntime in the header as a bool for exists unless N is found in the response. CGMiner Version 1.6.2 - September 2, 2011 - Add --failover-only option to not leak work to backup pools when the primary pool is lagging. - Change recommendation to intensity 9 for dedicated miners. - Fix the bouncing short term value by allowing it to change dynamically when the latest value is very different from the rolling value, but damp the change when it gets close. - Use the curses_lock to protect the curses_active variable and test it under lock. - Go back to requesting work 2/3 of the way through the current scantime with CPU mining as reports of mining threads running out of work have occurred with only 5 seconds to retrieve work. - Add start and stop time scheduling for regular time of day running or once off start/stop options. - Print summary on quit modes. - Put some sanity checks on the times that can be input. - Give a verbose message when no active pools are found and pause before exiting. - Add verbose message when a GPU fails to initialise, and disable the correct GPU. - Cryptopp asm32 was not correctly updated to the incremental nonce code so the hash counter was bogus. - Get rid of poorly executed curl check. - If curl does not have sockopts, do not try to compile the json_rpc_call_sockopt_cb function, making it possible to build against older curl libraries. - Most people expect /usr/local when an unspecified prefix is used so change to that. - Rename localgen occasions to getwork fail occasions since localgen is unrelated now. CGMiner Version 1.6.1 - August 29, 2011 - Copy cgminer path, not cat it. - Switching between redrawing windows does not fix the crash with old libncurses, so redraw both windows, but only when the window size hasn't changed. - Reinstate minimum 1 extra in queue to make it extremely unlikely to ever have 0 staged work items and any idle time. - Return -1 if no input is detected from the menu to prevent it being interpreted as a 0. - Make pthread, libcurl and libcurses library checks mandatory or fail. - Add a --disable-opencl configure option to make it possible to override detection of opencl and build without GPU mining support. - Confusion over the variable name for number of devices was passing a bogus value which likely was causing the zero sized binary issue. - cgminer no longer supports default url user and pass so remove them. - Don't show value of intensity since it's dynamic by default. - Add options to explicitly enable CPU mining or disable GPU mining. - Convert the opt queue into a minimum number of work items to have queued instead of an extra number to decrease risk of getting idle devices without increasing risk of higher rejects. - Statify tv_sort. - Check for SSE2 before trying to build 32 bit SSE2 assembly version. Prevents build failure when yasm is installed but -msse2 is not specified. - Add some defines to configure.ac to enable exporting of values and packaging, and clean up output. - Give convenient summary at end of ./configure. - Display version information and add --version command line option, and make sure we flush stdout. - Enable curses after the mining threads are set up so that failure messages won't be lost in the curses interface. - Disable curses after inputting a pool if we requested no curses interface. - Add an option to break out after successfully mining a number of accepted shares. - Exit with a failed return code if we did not reach opt_shares. - The cpu mining work data can get modified before we copy it if we submit it async, and the sync submission is not truly sync anyway, so just submit it sync. CGMiner Version 1.6.0 - August 26, 2011 - Make restarting of GPUs optional for systems that hang on any attempt to restart them. Fix DEAD status by comparing it to last live time rather than last attempted restart time since that happens every minute. - Move staged threads to hashes so we can sort them by time. - Create a hash list of all the blocks created and search them to detect when a new block has definitely appeared, using that information to detect stale work and discard it. - Update configure.ac for newer autoconf tools. - Use the new hashes directly for counts instead of the fragile counters currently in use. - Update to latest sse2 code from cpuminer-ng. - Allow LP to reset block detect and block detect lp flags to know who really came first. - Get start times just before mining begins to not have very slow rise in average. - Add message about needing one server. - We can queue all the necessary work without hitting frequent stales now with the time and string stale protection active all the time. This prevents a pool being falsely labelled as not providing work fast enough. - Include uthash.h in distro. - Implement SSE2 32 bit assembly algorithm as well. - Fail gracefully if unable to open the opencl files. - Make cgminer look in the install directory for the .cl files making make install work correctly. - Allow a custom kernel path to be entered on the command line. - Bump threshhold for lag up to maximum queued but no staged work. - Remove fragile source patching for bitalign, vectors et. al and simply pass it with the compiler options. - Actually check the value returned for the x-roll-ntime extension to make sure it isn't saying N. - Prevent segfault on exit for when accessory threads don't exist. - Disable curl debugging with opt protocol since it spews to stderr. CGMiner Version 1.5.8 - August 23, 2011 - Minimise how much more work can be given in cpu mining threads each interval. - Make the fail-pause progressively longer each time it fails until the network recovers. - Only display the lagging message if we've requested the work earlier. - Clean up the pool switching to not be dependent on whether the work can roll or not by setting a lagging flag and then the idle flag. - Only use one thread to determine if a GPU is sick or well, and make sure to reset the sick restart attempt time. - The worksize was unintentionally changed back to 4k by mistake, this caused a slowdown. CGMiner Version 1.5.7 - August 22, 2011 - Fix a crash with --algo auto - Test at appropriate target difficulty now. - Add per-device statics log output with --per-device-stats - Fix breakage that occurs when 1 or 4 vectors are chosen on new phatk. - Make rolltime report debug level only now since we check it every work item. - Add the ability to enable/disable per-device stats on the fly and match logging on/off. - Explicitly tell the compiler to retain the program to minimise the chance of the zero sized binary errors. - Add one more instruction to avoid one branch point in the common path in the cl return code. Although this adds more ALUs overall and more branch points, the common path code has the same number of ALUs and one less jmp, jmps being more expensive. - Explicitly link in ws2_32 on the windows build and update README file on how to compile successfully on windows. - Release cl resources should the gpu mining thread abort. - Attempt to restart a GPU once every minute while it's sick. - Don't kill off the reinit thread if it fails to init a GPU but returns safely. - Only declare a GPU dead if there's been no sign of activity from the reinit thread for 10 mins. - Never automatically disable any pools but just specify them as idle if they're unresponsive at startup. - Use any longpoll available, and don't disable it if switching to a server that doesn't have it. This allows you to mine solo, yet use the longpoll from a pool even if the pool is the backup server. - Display which longpoll failed and don't free the ram for lp_url since it belongs to the pool hdr path. - Make the tcp setsockopts unique to linux in the hope it allows freebsd et. al to compile. CGMiner Version 1.5.6 - August 17, 2011 - New phatk and poclbm kernels. Updated phatk to be in sync with latest 2.2 courtesy of phateus. Custom modified to work best with cgminer. - Updated output buffer code to use a smaller buffer with the kernels. - Clean up the longpoll management to ensure the right paths go to the right pool and display whether we're connected to LP or not in the status line. CGMiner Version 1.5.5 - August 16, 2011 - Rework entirely the GPU restart code. Strike a balance between code that re-initialises the GPU entirely so that soft hangs in the code are properly managed, but if a GPU is completely hung, the thread restart code fails gracefully, so that it does not take out any other code or devices. This will allow cgminer to keep restarting GPUs that can be restarted, but continue mining even if one or more GPUs hangs which would normally require a reboot. - Add --submit-stale option which submits all shares, regardless of whether they would normally be considered stale. - Keep options in alphabetical order. - Probe for slightly longer for when network conditions are lagging. - Only display the CPU algo when we're CPU mining. - As we have keepalives now, blaming network flakiness on timeouts appears to have been wrong. Set a timeout for longpoll to 1 hour, and most other network connectivity to 1 minute. - Simplify output code and remove HW errors from CPU stats. - Simplify code and tidy output. - Only show cpu algo in summary if cpu mining. - Log summary at the end as per any other output. - Flush output. - Add a linux-usb-cgminer guide courtesy of Kano. CGMiner Version 1.5.4 - August 14, 2011 - Add new option: --monitor Option lets user specify a command that will get forked by cgminer on startup. cgminer's stderr output subsequently gets piped directly to this command. - Allocate work from one function to be able to initialise variables added later. - Add missing fflush(stdout) for --ndevs and conclusion summary. - Preinitialise the devices only once on startup. - Move the non cl_ variables into the cgpu info struct to allow creating a new cl state on reinit, preserving known GPU variables. - Create a new context from scratch in initCQ in case something was corrupted to maximise our chance of succesfully creating a new worker thread. Hopefully this makes thread restart on GPU failure more reliable, without hanging everything in the case of a completely wedged GPU. - Display last initialised time in gpu management info, to know if a GPU has been re-initialised. - When pinging a sick cpu, flush finish and then ping it in a separate thread in the hope it recovers without needing a restart, but without blocking code elsewhere. - Only consider a pool lagging if we actually need the work and we have none staged despite queue requests stacking up. This decreases significantly the amount of work that leaks to the backup pools. - The can_roll function fails inappropriately in stale_work. - Only put the message that a pool is down if not pinging it every minute. This prevents cgminer from saying pool down at 1 minute intervals unless in debug mode. - Free all work in one place allowing us to perform actions on it in the future. - Remove the extra shift in the output code which was of dubious benefit. In fact in cgminer's implementation, removing this caused a miniscule speedup. - Test each work item to see if it can be rolled instead of per-pool and roll whenever possible, adhering to the 60 second timeout. This makes the period after a longpoll have smaller dips in throughput, as well as requiring less getworks overall thus increasing efficiency. - Stick to rolling only work from the current pool unless we're in load balance mode or lagging to avoid aggressive rolling imitating load balancing. - If a work item has had any mining done on it, don't consider it discarded work. CGMiner Version 1.5.3 - July 30, 2011 - Significant work went into attempting to make the thread restart code robust to identify sick threads, tag them SICK after 1 minute, then DEAD after 5 minutes of inactivity and try to restart them. Instead of re-initialising the GPU completely, only a new cl context is created to avoid hanging the rest of the GPUs should the dead GPU be hung irrevocably. - Use correct application name in syslog. - Get rid of extra line feeds. - Use pkg-config to check for libcurl version - Implement per-thread getwork count with proper accounting to not over-account queued items when local work replaces it. - Create a command queue from the program created from source which allows us to flush the command queue in the hope it will not generate a zero sized binary any more. - Be more willing to get work from the backup pools if the work is simply being queued faster than it is being retrieved. CGMiner Version 1.5.2 - July 28, 2011 - Restarting a hung GPU can hang the rest of the GPUs so just declare it dead and provide the information in the status. - The work length in the miner thread gets smaller but doesn't get bigger if it's under 1 second. This could end up leading to CPU under-utilisation and lower and lower hash rates. Fix it by increasing work length if it drops under 1 second. - Make the "quiet" mode still update the status and display errors, and add a new --real-quiet option which disables all output and can be set once while running. - Update utility and efficiency figures when displaying them. - Some Intel HD graphics support the opencl commands but return errors since they don't support opencl. Don't fail with them, just provide a warning and disable GPU mining. - Add http:// if it's not explicitly set for URL entries. - Log to the output file at any time with warnings and errors, instead of just when verbose mode is on. - Display the correct current hash as per blockexplorer, truncated to 16 characters, with just the time. CGMiner Version 1.5.1 - July 27, 2011 - Two redraws in a row cause a crash in old libncurses so just do one redraw using the main window. - Don't adjust hash_div only up for GPUs. Disable hash_div adjustment for GPUs. - Only free the thread structures if the thread still exists. - Update both windows separately, but not at the same time to prevent the double refresh crash that old libncurses has. Do the window resize check only when about to redraw the log window to minimise ncurses cpu usage. - Abstract out the decay time function and use it to make hash_div a rolling average so it doesn't change too abruptly and divide work in chunks large enough to guarantee they won't overlap. - Sanity check to prove locking. - Don't take more than one lock at a time. - Make threads report out when they're queueing a request and report if they've failed. - Make cpu mining work submission asynchronous as well. - Properly detect stale work based on time from staging and discard instead of handing on, but be more lax about how long work can be divided for up to the scantime. - Do away with queueing work separately at the start and let each thread grab its own work as soon as it's ready. - Don't put an extra work item in the queue as each new device thread will do so itself. - Make sure to decrease queued count if we discard the work. - Attribute split work as local work generation. - If work has been cloned it is already at the head of the list and when being reinserted into the queue it should be placed back at the head of the list. - Dividing work is like the work is never removed at all so treat it as such. However the queued bool needs to be reset to ensure we *can* request more work even if we didn't initially. - Make the display options clearer. - Add debugging output to tq_push calls. - Add debugging output to all tq_pop calls. CGMiner Version 1.5.0 - July 26, 2011 - Increase efficiency of slow mining threads such as CPU miners dramatically. Do this by detecting which threads cannot complete searching a work item within the scantime and then divide up a work item into multiple smaller work items. Detect the age of the work items and if they've been cloned before to prevent doing the same work over. If the work is too old to be divided, then see if it can be time rolled and do that to generate work. This dramatically decreases the number of queued work items from a pool leading to higher overall efficiency (but the same hashrate and share submission rate). - Don't request work too early for CPUs as CPUs will scan for the full opt_scantime anyway. - Simplify gpu management enable/disable/restart code. - Implement much more accurate rolling statistics per thread and per gpu and improve accuracy of rolling displayed values. - Make the rolling log-second average more accurate. - Add a menu to manage GPUs on the fly allowing you to enable/disable GPUs or try restarting them. - Keep track of which GPUs are alive versus enabled. - Start threads for devices that are even disabled, but don't allow them to start working. - The last pool is when we are low in total_pools, not active_pools. - Make the thread restart do a pthread_join after disabling the device, only re-enabling it if we succeed in restarting the thread. Do this from a separate thread so as to not block any other code.This will allow cgminer to continue even if one GPU hangs. - Try to do every curses manipulation under the curses lock. - Only use the sockoptfunction if the version of curl is recent enough. CGMiner Version 1.4.1 - July 24, 2011 - Do away with GET for dealing with longpoll forever. POST is the one that works everywhere, not the other way around. - Detect when the primary pool is lagging and start queueing requests on backup pools if possible before needing to roll work. - Load balancing puts more into the current pool if there are disabled pools. Fix. - Disable a GPU device should the thread fail to init. - Out of order command queue may fail on osx. Try without if it fails. - Fix possible dereference on blank inputs during input_pool. - Defines missing would segfault on --help when no sse mining is built in. - Revert "Free up resources/stale compilers." - didn't help. - Only try to print the status of active devices or it would crash. - Some hardware might benefit from the less OPS so there's no harm in leaving kernel changes that do that apart from readability of the code. CGMiner Version 1.4.0 - July 23, 2011 - Feature upgrade: Add keyboard input during runtime to allow modification of and viewing of numerous settings such as adding/removing pools, changing multipool management strategy, switching pools, changing intensiy, verbosity, etc. with a simple keypress menu system. - Free up resources/stale compilers. - Kernels are safely flushed in a way that allows out of order execution to work. - Sometimes the cl compiler generates zero sized binaries and only a reboot seems to fix it. - Don't try to stop/cancel threads that don't exist. - Only set option to show devices and exit if built with opencl support. - Enable curses earlier and exit with message in main for messages to not be lost in curses windows. - Make it possible to enter server credentials with curses input if none are specified on the command line. - Abstract out a curses input function and separate input pool function to allow for live adding of pools later. - Remove the nil arguments check to allow starting without parameters. - Disable/enable echo & cbreak modes. - Add a thread that takes keyboard input and allow for quit, silent, debug, verbose, normal, rpc protocol debugging and clear screen options. - Add pool option to input and display current pool status, pending code to allow live changes. - Add a bool for explicit enabling/disabling of pools. - Make input pool capable of bringing up pools while running. - Do one last check of the work before submitting it. - Implement the ability to live add, enable, disable, and switch to pools. - Only internally test for block changes when the work matches the current pool to prevent interleaved block change timing on multipools. - Display current pool management strategy to enable changing it on the fly. - The longpoll blanking of the current_block data may not be happening before the work is converted and appears to be a detected block change. Blank the current block be - Make --no-longpoll work again. - Abstract out active pools count. - Allow the pool strategy to be modified on the fly. - Display pool information on the fly as well. - Add a menu and separate out display options. - Clean up the messy way the staging thread communicates with the longpoll thread to determine who found the block first. - Make the input windows update immediately instead of needing a refresh. - Allow log interval to be set in the menu. - Allow scan settings to be modified at runtime. - Abstract out the longpoll start and explicitly restart it on pool change. - Make it possible to enable/disable longpoll. - Set priority correctly on multipools. Display priority and alive/dead information in display_pools. - Implement pool removal. - Limit rolltime work generation to 10 iterations only. - Decrease testing log to info level. - Extra refresh not required. - With huge variation in GPU performance, allow intensity to go from -10 to +10. - Tell getwork how much of a work item we're likely to complete for future splitting up of work. - Remove the mandatory work requirement at startup by testing for invalid work being passed which allows for work to be queued immediately. This also removes the requirem - Make sure intensity is carried over to thread count and is at least the minimum necessary to work. - Unlocking error on retry. Locking unnecessary anyway so remove it. - Clear log window from consistent place. No need for locking since logging is disabled during input. - Cannot print the status of threads that don't exist so just queue enough work for the number of mining threads to prevent crash with -Q N. - Update phatk kernel to one with new parameters for slightly less overhead again. Make the queue kernel parameters call a function pointer to select phatk or poclbm. - Make it possible to select the choice of kernel on the command line. - Simplify the output part of the kernel. There's no demonstrable advantage from more complexity. - Merge pull request #18 from ycros/cgminer - No need to make leaveok changes win32 only. - Build support in for all SSE if possible and only set the default according to machine capabilities. - Win32 threading and longpoll keepalive fixes. - Win32: Fix for mangled output on the terminal on exit. CGMiner Version 1.3.1 - July 20, 2011 - Feature upgrade; Multiple strategies for failover. Choose from default which now falls back to a priority order from 1st to last, round robin which only changes pools when one is idle, rotate which changes pools at user-defined intervals, and load-balance which spreads the work evenly amongst all pools. - Implement pool rotation strategy. - Implement load balancing algorithm by rotating requests to each pool. - Timeout on failed discarding of staged requests. - Implement proper flagging of idle pools, test them with the watchdog thread, and failover correctly. - Move pool active test to own function. - Allow multiple strategies to be set for multipool management. - Track pool number. - Don't waste the work items queued on testing the pools at startup. - Reinstate the mining thread watchdog restart. - Add a getpoll bool into the thread information and don't restart threads stuck waiting on work. - Rename the idlenet bool for the pool for later use. - Allow the user/pass userpass urls to be input in any order. - When json rpc errors occur they occur in spits and starts, so trying to limit them with the comms error bool doesn't stop a flood of them appearing. - Reset the queued count to allow more work to be queued for the new pool on pool switch. CGMiner Version 1.3.0 - July 19, 2011 - Massive infrastructure update to support pool failover. - Accept multiple parameters for url, user and pass and set up structures of pool data accordingly. - Probe each pool for what it supports. - Implement per pool feature support according to rolltime support as advertised by server. - Do switching automatically based on a 300 second timeout of locally generated work or 60 seconds of no response from a server that doesn't support rolltime. - Implement longpoll server switching. - Keep per-pool data and display accordingly. - Make sure cgminer knows how long the pool has actually been out for before deeming it a prolonged outage. - Fix bug with ever increasing staged work in 1.2.8 that eventually caused infinite rejects. - Make warning about empty http requests not show by default since many servers do this regularly. CGMiner Version 1.2.8 - July 18, 2011 - More OSX build fixes. - Add an sse4 algorithm to CPU mining. - Fix CPU mining with other algorithms not working. - Rename the poclbm file to ensure a new binary is built since. - We now are guaranteed to have one fresh work item after a block change and we should only discard staged requests. - Don't waste the work we retrieve from a longpoll. - Provide a control lock around global bools to avoid racing on them. - Iterating over 1026 nonces when confirming data from the GPU is old code and unnecessary and can lead to repeats/stales. - The poclbm kernel needs to be updated to work with the change to 4k sized output buffers. - longpoll seems to work either way with post or get but some servers prefer get so change to httpget. CGMiner Version 1.2.7 - July 16, 2011 - Show last 8 characters of share submitted in log. - Display URL connected to and user logged in as in status. - Display current block and when it was started in the status line. - Only pthread_join the mining threads if they exist as determined by pthread_cancel and don't fail on pthread_cancel. - Create a unique work queue for all getworks instead of binding it to thread 0 to avoid any conflict over thread 0's queue. - Clean up the code to make it clear it's watchdog thread being messaged to restart the threads. - Check the current block description hasn't been blanked pending the real new current block data. - Re-enable signal handlers once the signal has been received to make it possible to kill cgminer if it fails to shut down. - Disable restarting of CPU mining threads pending further investigation. - Update longpoll messages. - Add new block data to status line. - Fix opencl tests for osx. - Only do local generation of work if the work item is not stale itself. - Check for stale work within the mining threads and grab new work if positive. - Test for idle network conditions and prevent threads from being restarted by the watchdog thread under those circumstances. - Make sure that local work generation does not continue indefinitely by stopping it after 10 minutes. - Tweak the kernel to have a shorter path using a 4k buffer and a mask on the nonce value instead of a compare and loop for a shorter code path. - Allow queue of zero and make that default again now that we can track how work is being queued versus staged. This can decrease reject rates. - Queue precisely the number of mining threads as longpoll_staged after a new block to not generate local work. CGMiner Version 1.2.6 - July 15, 2011 - Put a current system status line beneath the total work status line - Fix a counting error that would prevent cgminer from correctly detecting situations where getwork was failing - this would cause stalls sometimes unrecoverably. - Limit the maximum number of requests that can be put into the queue which otherwise could get arbitrarily long during a network outage. - Only count getworks that are real queue requests. CGMiner Version 1.2.5 - July 15, 2011 - Conflicting -n options corrected - Setting an intensity with -I disables dynamic intensity setting - Removed option to manually disable dynamic intensity - Improve display output - Implement signal handler and attempt to clean up properly on exit - Only restart threads that are not stuck waiting on mandatory getworks - Compatibility changes courtesy of Ycros to build on mingw32 and osx - Explicitly grab first work item to prevent false positive hardware errors due to working on uninitialised work structs - Add option for non curses --text-only output - Ensure we connect at least once successfully before continuing to retry to connect in case url/login parameters were wrong - Print an executive summary when cgminer is terminated - Make sure to refresh the status window CGMiner Versions -> 1.2.4 - Con Kolivas - July 2011. New maintainership of code under cgminer name. - Massive rewrite to incorporate GPU mining. - Incorporate original oclminer c code. - Rewrite gpu mining code to efficient work loops. - Implement per-card detection and settings. - Implement vector code. - Implement bfi int patching. - Import poclbm and phatk ocl kernels and use according to hardware type. - Implement customised optimised versions of opencl kernels. - Implement binary kernel generation and loading. - Implement preemptive asynchronous threaded work gathering and pushing. - Implement variable length extra work queues. - Optimise workloads to be efficient miners instead of getting lots of extra work. - Implement total hash throughput counters, per-card accepted, rejected and hw error count. - Staging and watchdog threads to prevent fallover. - Stale and reject share guarding. - Autodetection of new blocks without longpoll. - Dynamic setting of intensity to maintain desktop interactivity. - Curses interface with generous statistics and information. - Local generation of work (xroll ntime) when detecting poor network connectivity. cpuminer Version 1.0.2 - Linux x86_64 optimisations - Con Kolivas - Optimise for x86_64 by default by using sse2_64 algo - Detects CPUs and sets number of threads accordingly - Uses CPU affinity for each thread where appropriate - Sets scheduling policy to lowest possible - Minor performance tweaks cpuminer Version 1.0.1 - May 14, 2011 - OSX support cpuminer Version 1.0 - May 9, 2011 - jansson 2.0 compatibility - correct off-by-one in date (month) display output - fix platform detection - improve yasm configure bits - support full URL, in X-Long-Polling header cpuminer Version 0.8.1 - March 22, 2011 - Make --user, --pass actually work - Add User-Agent HTTP header to requests, so that server operators may more easily identify the miner client. - Fix minor bug in example JSON config file cpuminer Version 0.8 - March 21, 2011 - Support long polling: http://deepbit.net/longpolling.php - Adjust max workload based on scantime (default 5 seconds, or 60 seconds for longpoll) - Standardize program output, and support syslog on Unix platforms - Suport --user/--pass options (and "user" and "pass" in config file), as an alternative to the current --userpass cpuminer Version 0.7.2 - March 14, 2011 - Add port of ufasoft's sse2 assembly implementation (Linux only) This is a substantial speed improvement on Intel CPUs. - Move all JSON-RPC I/O to separate thread. This reduces the number of HTTP connections from one-per-thread to one, reducing resource usage on upstream bitcoind / pool server. cpuminer Version 0.7.1 - March 2, 2011 - Add support for JSON-format configuration file. See example file example-cfg.json. Any long argument on the command line may be stored in the config file. - Timestamp each solution found - Improve sha256_4way performance. NOTE: This optimization makes the 'hash' debug-print output for sha256_way incorrect. - Use __builtin_expect() intrinsic as compiler micro-optimization - Build on Intel compiler - HTTP library now follows HTTP redirects cpuminer Version 0.7 - February 12, 2011 - Re-use CURL object, thereby reuseing DNS cache and HTTP connections - Use bswap_32, if compiler intrinsic is not available - Disable full target validation (as opposed to simply H==0) for now cpuminer Version 0.6.1 - February 4, 2011 - Fully validate "hash < target", rather than simply stopping our scan if the high 32 bits are 00000000. - Add --retry-pause, to set length of pause time between failure retries - Display proof-of-work hash and target, if -D (debug mode) enabled - Fix max-nonce auto-adjustment to actually work. This means if your scan takes longer than 5 seconds (--scantime), the miner will slowly reduce the number of hashes you work on, before fetching a new work unit. cpuminer Version 0.6 - January 29, 2011 - Fetch new work unit, if scanhash takes longer than 5 seconds (--scantime) - BeeCee1's sha256 4way optimizations - lfm's byte swap optimization (improves via, cryptopp) - Fix non-working short options -q, -r cpuminer Version 0.5 - December 28, 2010 - Exit program, when all threads have exited - Improve JSON-RPC failure diagnostics and resilience - Add --quiet option, to disable hashmeter output. cpuminer Version 0.3.3 - December 27, 2010 - Critical fix for sha256_cryptopp 'cryptopp_asm' algo cpuminer Version 0.3.2 - December 23, 2010 - Critical fix for sha256_via cpuminer Version 0.3.1 - December 19, 2010 - Critical fix for sha256_via - Retry JSON-RPC failures (see --retry, under "minerd --help" output) cpuminer Version 0.3 - December 18, 2010 - Add crypto++ 32bit assembly implementation - show version upon 'minerd --help' - work around gcc 4.5.x bug that killed 4way performance cpuminer Version 0.2.2 - December 6, 2010 - VIA padlock implementation works now - Minor build and runtime fixes cpuminer Version 0.2.1 - November 29, 2010 - avoid buffer overflow when submitting solutions - add Crypto++ sha256 implementation (C only, ASM elided for now) - minor internal optimizations and cleanups cpuminer Version 0.2 - November 27, 2010 - Add script for building a Windows installer - improve hash performance (hashmeter) statistics - add tcatm 4way sha256 implementation - Add experimental VIA Padlock sha256 implementation cpuminer Version 0.1.2 - November 26, 2010 - many small cleanups and micro-optimizations - build win32 exe using mingw - RPC URL, username/password become command line arguments - remove unused OpenSSL dependency cpuminer Version 0.1.1 - November 24, 2010 - Do not build sha256_generic module separately from cpuminer. cpuminer Version 0.1 - November 24, 2010 - Initial release. bfgminer-bfgminer-3.10.0/README000066400000000000000000001154751226556647300160700ustar00rootroot00000000000000BFGMiner: St. Barbara's Faithfully Glorified Mining Initiative Naturally Exceeding Rivals or Basically a Freaking Good Miner This is a multi-threaded multi-pool ASIC, FPGA, GPU and CPU miner with dynamic clocking, monitoring, and fanspeed support for bitcoin. Do not use on multiple block chains at the same time! This code is provided entirely free of charge by the programmer in his spare time so donations would be greatly appreciated. Please consider donating to the address below. Luke-Jr 1QATWksNFGeUJCWBrN4g6hGM178Lovm7Wh DOWNLOADS: http://luke.dashjr.org/programs/bitcoin/files/bfgminer GIT TREE: https://github.com/luke-jr/bfgminer Bug reports: https://github.com/luke-jr/bfgminer/issues IRC Channel: irc://irc.freenode.net/eligius License: GPLv3. See COPYING for details. SEE ALSO README.ASIC, README.FPGA, README.GPU, README.RPC, AND README.scrypt FOR MORE INFORMATION ON EACH. --- EXECUTIVE SUMMARY ON USAGE: Single pool: bfgminer -o http://pool:port -u username -p password Multiple pools: bfgminer -o http://pool1:port -u pool1username -p pool1password -o http://pool2:port -u pool2usernmae -p pool2password Single pool with a standard http proxy: bfgminer -o http://pool:port -x http://proxy:port -u username -p password Single pool with a socks5 proxy: bfgminer -o http://pool:port -x socks5://proxy:port -u username -p password The list of proxy types are: http: standard http 1.1 proxy socks4: socks4 proxy socks5: socks5 proxy socks4a: socks4a proxy socks5h: socks5 proxy using a hostname Proxy support requires cURL version 7.21.7 or newer. If you specify the --socks-proxy option to BFGMiner, it will only be applied to all pools that don't specify their own proxy setting like above After saving configuration from the menu ([S],[W]) you do not need to give BFGMiner any arguments, it will load your configuration instead. Any configuration file may also contain a single "include" : "filename" to recursively include another configuration file. Writing the configuration will save all settings from all files to the output configuration file. --- BUILDING BFGMINER Everything you probably want, condensed: build-essential autoconf automake libtool pkg-config libcurl4-gnutls-dev libjansson-dev uthash-dev libncursesw5-dev libudev-dev libusb-1.0-0-dev libevent-dev libmicrohttpd-dev hidapi Dependencies: autoconf http://www.gnu.org/software/autoconf/ automake http://www.gnu.org/software/automake/ libtool http://www.gnu.org/software/libtool/ pkg-config http://www.freedesktop.org/wiki/Software/pkg-config ...or pkgconf https://github.com/pkgconf/pkgconf libcurl4-gnutls-dev http://curl.haxx.se/libcurl/ libjansson-dev 2.0+ http://www.digip.org/jansson/ uthash-dev 1.9.4+ http://troydhanson.github.io/uthash/ Optional Dependencies: Text-User-Interface (TUI): curses dev library; any one of: libncurses5-dev http://www.gnu.org/software/ncurses/ (Linux and Mac) libncursesw5-dev ^ same libpdcurses http://pdcurses.sourceforge.net/ (Linux/Mac/Windows) Multiple ASIC/FPGA autodetection: any one of: sysfs (built-in to most Linux kernels, just mount on /sys) libudev-dev http://www.freedesktop.org/software/systemd/libudev/ HashBuster Nano & NanoFury USB devices: hidapi https://github.com/signal11/hidapi getwork server for Block Erupter Blades: libmicrohttpd-dev 0.9.5+ http://www.gnu.org/software/libmicrohttpd/ Stratum proxy: libevent 2.0.3+ http://libevent.org/ HashBuster Micro, Klondike, X6500 and ZTEX FPGA boards: libusb-1.0-0-dev http://www.libusb.org/ Video card GPU mining (free): llvm 3.3+ http://llvm.org/ clang 3.3+ http://clang.llvm.org/ libclc http://libclc.llvm.org/ Mesa 9.2.0+ http://www.mesa3d.org/ ATi/AMD video card GPU mining (non-free): AMD APP SDK http://developer.amd.com/tools/heterogeneous-computing/amd-accelerated-parallel-processing-app-sdk/ CPU mining optimized assembly algorithms: yasm 1.0.1+ http://yasm.tortall.net/ BFGMiner specific configuration options: --disable-avalon Compile support for Avalon (default enabled) --enable-cpumining Build with cpu mining support(default disabled) --enable-opencl Compile support for OpenCL (default disabled) --disable-adl Build without ADL monitoring (default enabled) --disable-bitfury Compile support for Bitfury (default enabled) --enable-bfsb Compile support for BFSB (default disabled) --disable-bigpic Compile support for Big Picture Mining USB (default enabled) --disable-littlefury Compile support for LittleFury (default enabled) --disable-nanofury Compile support for NanoFury (default enabled) --disable-hashbuster Compile support for HashBuster Nano (default enabled) --disable-hashbuster2 Compile support for HashBuster Micro (default if libusb) --enable-metabank Compile support for Metabank (default disabled) --disable-bitforce Compile support for BitForce (default enabled) --disable-icarus Compile support for Icarus (default enabled) --disable-klondike Compile support for Klondike (default enabled) --enable-knc Compile support for KnC (default disabled) --disable-modminer Compile support for ModMiner (default enabled) --disable-x6500 Compile support for X6500 (default enabled) --disable-ztex Compile support for ZTEX (default if libusb) --enable-scrypt Compile support for scrypt mining (default disabled) --with-system-libblkmaker Use system libblkmaker rather than bundled one (default disabled) --with-udevrulesdir=DIR Install udev rules into this directory --without-sensors Build with libsensors monitoring (default enabled) --without-curses Compile support for curses TUI (default enabled) --without-libmicrohttpd Compile support for libmicrohttpd getwork server (default enabled) --without-libevent Compile support for libevent stratum server (default enabled) --without-libusb Compile using libusb (default enabled) --without-libudev Autodetect FPGAs using libudev (default enabled) Basic *nix build instructions: ./autogen.sh # only needed if building from git repo ./configure make No installation is necessary. You may run BFGMiner from the build directory directly. On Mac OS X, you can use Homebrew to install the dependency libraries. When you are ready to build BFGMiner, you may need to point the configure script at one or more pkg-config paths. For example: ./configure PKG_CONFIG_PATH=/usr/local/opt/curl/lib/pkgconfig:/usr/local/opt/jansson/lib/pkgconfig Native WIN32 build instructions: see windows-build.txt If you build BFGMiner from source, it is recommended that you run it from the build directory. On *nix, you will usually need to prepend your command with a path like this (if you are in the bfgminer directory already): ./bfgminer To install system wide run 'sudo make install' or 'make install' as root. You can then run from any terminal. --- Usage instructions: Run "bfgminer --help" to see options: Usage: bfgminer [-DdElmpPQqUsTouOchnV] Options for both config file and command line: --api-allow Allow API access (if enabled) only to the given list of [W:]IP[/Prefix] address[/subnets] This overrides --api-network and you must specify 127.0.0.1 if it is required W: in front of the IP address gives that address privileged access to all api commands --api-description Description placed in the API status header (default: BFGMiner version) --api-groups API one letter groups G:cmd:cmd[,P:cmd:*...] See README.RPC for usage --api-listen Listen for API requests (default: disabled) By default any command that does not just display data returns access denied See --api-allow to overcome this --api-mcast Enable API Multicast listener, default: disabled --api-mcast-addr API Multicast listen address (default: "224.0.0.75") --api-mcast-code Code expected in the API Multicast message, don't use '-' (default: "FTW") --api-mcast-port API Multicast listen port (default: 4028) --api-network Allow API (if enabled) to listen on/for any address (default: only 127.0.0.1) --api-port Port number of miner API (default: 4028) --balance Change multipool strategy from failover to even share balance --benchmark Run BFGMiner in benchmark mode - produces no shares --chroot-dir Chroot to a directory right after startup --cmd-idle Execute a command when a device is allowed to be idle (rest or wait) --cmd-sick Execute a command when a device is declared sick --cmd-dead Execute a command when a device is declared dead --coinbase-addr Set coinbase payout address for solo mining --coinbase-sig Set coinbase signature when possible --compact Use compact display without per device statistics --debug|-D Enable debug output --debuglog Enable debug logging --device|-d Enable only devices matching pattern (default: all) --disable-rejecting Automatically disable pools that continually reject shares --http-port Port number to listen on for HTTP getwork miners (-1 means disabled) (default: -1) --expiry|-E Upper bound on how many seconds after getting work we consider a share from it stale (w/o longpoll active) (default: 120) --expiry-lp Upper bound on how many seconds after getting work we consider a share from it stale (with longpoll active) (default: 3600) --failover-only Don't leak work to backup pools when primary pool is lagging --force-dev-init Always initialize devices when possible (such as bitstream uploads to some FPGAs) --kernel-path|-K Specify a path to where bitstream and kernel files are (default: "/usr/local/bin") --load-balance Change multipool strategy from failover to quota based balance --log|-l Interval in seconds between log output (default: 5) --log-file|-L Append log file for output messages --log-microseconds Include microseconds in log output --monitor|-m Use custom pipe cmd for output messages --net-delay Impose small delays in networking to avoid overloading slow routers --no-gbt Disable getblocktemplate support --no-getwork Disable getwork support --no-longpoll Disable X-Long-Polling support --no-restart Do not attempt to restart devices that hang --no-stratum Disable Stratum detection --no-submit-stale Don't submit shares if they are detected as stale --no-opencl-binaries Don't attempt to use or save OpenCL kernel binaries --no-unicode Don't use Unicode characters in TUI --noncelog Create log of all nonces found --pass|-p Password for bitcoin JSON-RPC server --per-device-stats Force verbose mode and output per-device statistics --pool-proxy|-x Proxy URI to use for connecting to just the previous-defined pool --protocol-dump|-P Verbose dump of protocol-level activities --queue|-Q Minimum number of work items to have queued (0 - 10) (default: 1) --quiet|-q Disable logging output, display status and errors --real-quiet Disable all output --remove-disabled Remove disabled devices entirely, as if they didn't exist --request-diff Request a specific difficulty from pools (default: 1.0) --retries Number of times to retry failed submissions before giving up (-1 means never) (default: -1) --rotate Change multipool strategy from failover to regularly rotate at N minutes (default: 0) --round-robin Change multipool strategy from failover to round robin on failure --scan|-S Configure how to scan for mining devices --scan-time|-s Upper bound on time spent scanning current work, in seconds (default: 60) --sched-start Set a time of day in HH:MM to start mining (a once off without a stop time) --sched-stop Set a time of day in HH:MM to stop mining (will quit without a start time) --scrypt Use the scrypt algorithm for mining (non-bitcoin) --set-device Set default parameters on devices; eg, NFY:osc6_bits=50 --setuid Username of an unprivileged user to run as --sharelog Append share log to file --shares Quit after mining N shares (default: unlimited) --show-processors Show per processor statistics in summary --skip-security-checks Skip security checks sometimes to save bandwidth; only check 1/th of the time (default: never skip) --socks-proxy Set socks proxy (host:port) for all pools without a proxy specified --stratum-port Port number to listen on for stratum miners (-1 means disabled) (default: -1) --submit-threads Minimum number of concurrent share submissions (default: 64) --syslog Use system log for output messages (default: standard error) --temp-cutoff Maximum temperature devices will be allowed to reach before being disabled, one value or comma separated list --temp-hysteresis Set how much the temperature can fluctuate outside limits when automanaging speeds (default: 3) --temp-target Target temperature when automatically managing fan and clock speeds --text-only|-T Disable ncurses formatted screen output --unicode Use Unicode characters in TUI --url|-o URL for bitcoin JSON-RPC server --user|-u Username for bitcoin JSON-RPC server --verbose Log verbose output to stderr as well as status output --weighed-stats Display statistics weighed to difficulty 1 --userpass|-O Username:Password pair for bitcoin JSON-RPC server Options for command line only: --config|-c Load a JSON-format configuration file See example.conf for an example configuration. --help|-h Print this message --version|-V Display version and exit GPU only options: --auto-fan Automatically adjust all GPU fan speeds to maintain a target temperature --auto-gpu Automatically adjust all GPU engine clock speeds to maintain a target temperature --gpu-threads|-g Number of threads per GPU (1 - 10) (default: 2) --gpu-dyninterval Set the refresh interval in ms for GPUs using dynamic intensity (default: 7) --gpu-engine GPU engine (over)clock range in MHz - one value, range and/or comma separated list (e.g. 850-900,900,750-850) --gpu-fan GPU fan percentage range - one value, range and/or comma separated list (e.g. 25-85,85,65) --gpu-map Map OpenCL to ADL device order manually, paired CSV (e.g. 1:0,2:1 maps OpenCL 1 to ADL 0, 2 to 1) --gpu-memclock Set the GPU memory (over)clock in MHz - one value for all or separate by commas for per card. --gpu-memdiff Set a fixed difference in clock speed between the GPU and memory in auto-gpu mode --gpu-platform Select OpenCL platform ID to use for GPU mining --gpu-powertune Set the GPU powertune percentage - one value for all or separate by commas for per card. --gpu-reorder Attempt to reorder GPU devices according to PCI Bus ID --gpu-vddc Set the GPU voltage in Volts - one value for all or separate by commas for per card. --intensity|-I Intensity of GPU scanning (d or -10 -> 10, default: d to maintain desktop interactivity) --kernel|-k Override kernel to use (diablo, poclbm, phatk or diakgcn) - one value or comma separated --no-adl Disable the ATI display library used for monitoring and setting GPU parameters --temp-overheat Overheat temperature when automatically managing fan and GPU speeds (default: 85) --vectors|-v Override detected optimal vector (1, 2 or 4) - one value or comma separated list --worksize|-w Override detected optimal worksize - one value or comma separated list GPU mining is disabled by default for SHA256d if you have any dedicated mining devices, but can be enabled explicitly specifying the -S opencl:auto option. See README.GPU for more information regarding GPU mining. scrypt only options: --lookup-gap Set GPU lookup gap for scrypt mining, comma separated --shaders GPU shaders per card for tuning scrypt, comma separated --thread-concurrency Set GPU thread concurrency for scrypt mining, comma separated See README.scrypt for more information regarding (non-bitcoin) scrypt mining. To use ASICs or FPGAs, you will need to be sure the user BFGMiner is running as has appropriate permissions. This varies by operating system. On Linux, with BFGMiner's udev rules: sudo usermod -a -G video Note that on GNU/Linux systems, you will usually need to login again before group changes take effect. By default, BFGMiner will scan for autodetected devices. If you want to prevent BFGMiner from doing this, you can use "-S noauto". If you want to probe all serial ports, you can use "-S all"; note that this may write data to non-mining devices which may then behave in unexpected ways! On Linux, is usually of the format /dev/ttyUSBn On Mac OS X, is usually of the format /dev/cu.usb* On Windows, is usually of the format \\.\COMn (where n = the correct device number for the device) The official supplied binaries are compiled with support for all ASICs/FPGAs. To force the code to only attempt detection with a specific driver, prepend the argument with the driver name followed by an "at" symbol. For example, "icarus@/dev/ttyUSB0" or "bitforce@\\.\COM5" or using the short name: "ica@/dev/ttyUSB0" or "bfl@\\.\COM5" Some FPGAs do not have non-volatile storage for their bitstreams and must be programmed every power cycle, including first use. To use these devices, you must download the proper bitstream from the vendor's website and copy it to the "bitstreams" directory into your BFGMiner application directory. See README.ASIC and README.FPGA for more information regarding these. See README.CPU for information regarding CPU mining. --- WHILE RUNNING: The following options are available while running with a single keypress: [M]anage devices [P]ool management [S]ettings [D]isplay options [H]elp [Q]uit M gives you something like: Select processor to manage using up/down arrow keys BFL 0a: 78.0C | 3.64/ 3.70/ 2.91Gh/s | A:46 R:0+0(none) HW: 2/none BitFORCE SHA256 SC from Butterfly Labs Serial: FTWN6T67 [D]isable Or press Enter when done P gives you: Current pool management strategy: Failover [F]ailover only disabled [A]dd pool [R]emove pool [D]isable pool [E]nable pool [C]hange management strategy [S]witch pool [I]nformation S gives you: [L]ongpoll: On [Q]ueue: 1 [S]cantime: 60 [E]xpiry: 120 [R]etries: -1 [W]rite config file [B]FGMiner restart D gives you: [N]ormal [C]lear [S]ilent mode (disable all output) [D]ebug:off [P]er-device:off [Q]uiet:off [V]erbose:off [R]PC debug:off [W]orkTime details:off co[M]pact: off [L]og interval:5 Q quits the application. The running log shows output similar to that below: [2013-02-13 00:26:30] Accepted 1758e8df BFL 0 pool 0 Diff 10/1 [2013-02-13 00:26:32] Accepted 1d9a2199 MMQ 0a pool 0 Diff 8/1 [2013-02-13 00:26:33] Accepted b1304924 ZTX 0 pool 0 Diff 1/1 [2013-02-13 00:26:33] Accepted c3ad22f4 XBS 0b pool 0 Diff 1/1 The 8 byte hex value are the 2nd set of 32 bits from the share submitted to the pool. The 2 diff values are the actual difficulty target that share reached followed by the difficulty target the pool is currently asking for. --- Also many issues and FAQs are covered in the forum threads dedicated to this program, https://bitcointalk.org/?topic=78192 https://bitcointalk.org/?topic=168174 The block display shows: Block: ...1b89f8d3 #217364 Diff:7.67M (54.93Th/s) Started: [17:17:22] This shows a short stretch of the current block, the next block's height and difficulty (including the network hashrate that difficulty represents), and when the search for the new block started. The BFGMiner status line shows: ST:1 F:0 NB:1 AS:0 BW:[ 75/241 B/s] E:2.42 I:12.99mBTC/hr BS:2.71k ST is STaged work items (ready to use). F is network Failure occasions (server down or slow to provide work) NB is New Blocks detected on the network AS is Active Submissions (shares in the process of submitting) BW is BandWidth usage on the network (received/sent) E is Efficiency defined as number of shares accepted (multiplied by their difficulty) per 2 KB of bandwidth I is expected Income, calculated by actual shares submitted in 100% PPS value (assumes Bitcoin, does not account for altcoin conversions!) BS is the all time Best Share difficulty you've found The totals line shows the following: 6/32 75.0C | 171.3/170.8/171.2Gh/s | A:729 R:8+0(.01%) HW:0/.81% Each column is as follows: The number of devices and processors currently mining Hottest temperature reported by any processor 5 second exponentially decaying average hash rate An all time average hash rate An all time average hash rate based on actual nonces found, adjusted for pool reject and stale rate The number of Accepted shares The number of Rejected shares and stale shares discarded (never submitted), and the percentage these are of total found. The number of HardWare errors, and percentage invalid of nonces returned Each device shows: BFL 2: 74.0C | 51.97/58.90/57.17Gh/s | A:847 R:15+0(.54%) HW:496/.91% Columns are the same as in the totals line. --- MULTIPOOL FAILOVER STRATEGIES WITH MULTIPOOL: A number of different strategies for dealing with multipool setups are available. Each has their advantages and disadvantages so multiple strategies are available by user choice, as per the following list: FAILOVER: The default strategy is failover. This means that if you input a number of pools, it will try to use them as a priority list, moving away from the 1st to the 2nd, 2nd to 3rd and so on. If any of the earlier pools recover, it will move back to the higher priority ones. ROUND ROBIN: This strategy only moves from one pool to the next when the current one falls idle and makes no attempt to move otherwise. ROTATE: This strategy moves at user-defined intervals from one active pool to the next, skipping pools that are idle. LOAD BALANCE: This strategy sends work to all the pools on a quota basis. By default, all pools are allocated equal quotas unless specified with --quota. This apportioning of work is based on work handed out, not shares returned so is independent of difficulty targets or rejected shares. While a pool is disabled or dead, its quota is dropped until it is re-enabled. Quotas are forward looking, so if the quota is changed on the fly, it only affects future work. If all pools are set to zero quota or all pools with quota are dead, it will fall back to a failover mode. See quota below for more information. The failover-only flag has special meaning in combination with load-balance mode and it will distribute quota back to priority pool 0 from any pools that are unable to provide work for any reason so as to maintain quota ratios between the rest of the pools. BALANCE: This strategy monitors the amount of difficulty 1 shares solved for each pool and uses it as a basis for trying to doing the same amount of work for each pool. --- SOLO MINING BFGMiner supports solo mining with any GBT-compatible bitcoin node (such as bitcoind). To use this mode, you need to specify the URL of your bitcoind node using the usual pool options (--url, --userpass, etc), and the --coinbase-addr option to specify the Bitcoin address you wish to receive the block rewards mined. If you are solo mining with more than one instance of BFGMiner (or any other software) per payout address, you must also specify data using the --coinbase-sig option to ensure each miner is working on unique work. Note that this data will be publicly seen if your miner finds a block using any GBT-enabled pool, even when not solo mining (such as failover). If your bitcoin node does not support longpolling (for example, bitcoind 0.8.x), you should consider setting up a failover pool to provide you with block notifications. Note that solo mining does not use shares, so BFGMiner's adjusted hashrate (third column) may suddenly drop to zero if a block you submit is rejected; this does not indicate that it has stopped mining. Example solo mining usage: bfgminer -o http://localhost:8332 -u username -p password \ --coinbase-addr 1QATWksNFGeUJCWBrN4g6hGM178Lovm7Wh \ --coinbase-sig "rig1: This is Joe's block!" --- QUOTAS The load-balance multipool strategy works off a quota based scheduler. The quotas handed out by default are equal, but the user is allowed to specify any arbitrary ratio of quotas. For example, if all the quota values add up to 100, each quota value will be a percentage, but if 2 pools are specified and pool0 is given a quota of 1 and pool1 is given a quota of 9, pool0 will get 10% of the work and pool1 will get 90%. Quotas can be changed on the fly with RPC, and do not act retrospectively. Setting a quota to zero will effectively disable that pool unless all other pools are disabled or dead. In that scenario, load-balance falls back to regular failover priority-based strategy. While a pool is dead, it loses its quota and no attempt is made to catch up when it comes back to life. To specify quotas on the command line, pools should be specified with a semicolon separated --quota(or -U) entry instead of --url. Pools specified with --url are given a nominal quota value of 1 and entries can be mixed. For example: --url poolA:portA -u usernameA -p passA --quota "2;poolB:portB" -u usernameB -p passB Will give poolA 1/3 of the work and poolB 2/3 of the work. Writing configuration files with quotas is likewise supported. To use the above quotas in a configuration file they would be specified thus: "pools" : [ { "url" : "poolA:portA", "user" : "usernameA", "pass" : "passA" }, { "quota" : "2;poolB:portB", "user" : "usernameB", "pass" : "passB" } ] --- LOGGING BFGMiner will log to stderr if it detects stderr is being redirected to a file. To enable logging simply add 2>logfile.txt to your command line and logfile.txt will contain the logged output at the log level you specify (normal, verbose, debug etc.) In other words if you would normally use: ./bfgminer -o xxx -u yyy -p zzz if you use ./bfgminer -o xxx -u yyy -p zzz 2>logfile.txt it will log to a file called logfile.txt and otherwise work the same. There is also the -m option on linux which will spawn a command of your choice and pipe the output directly to that command. The WorkTime details 'debug' option adds details on the end of each line displayed for Accepted or Rejected work done. An example would be: <-00000059.ed4834a3 M:X D:1.0 G:17:02:38:0.405 C:1.855 (2.995) W:3.440 (0.000) S:0.461 R:17:02:47 The first 2 hex codes are the previous block hash, the rest are reported in seconds unless stated otherwise: The previous hash is followed by the getwork mode used M:X where X is one of P:Pool, T:Test Pool, L:LP or B:Benchmark, then D:d.ddd is the difficulty required to get a share from the work, then G:hh:mm:ss:n.nnn, which is when the getwork or LP was sent to the pool and the n.nnn is how long it took to reply, followed by 'O' on its own if it is an original getwork, or 'C:n.nnn' if it was a clone with n.nnn stating how long after the work was recieved that it was cloned, (m.mmm) is how long from when the original work was received until work started, W:n.nnn is how long the work took to process until it was ready to submit, (m.mmm) is how long from ready to submit to actually doing the submit, this is usually 0.000 unless there was a problem with submitting the work, S:n.nnn is how long it took to submit the completed work and await the reply, R:hh:mm:ss is the actual time the work submit reply was received If you start BFGMiner with the --sharelog option, you can get detailed information for each share found. The argument to the option may be "-" for standard output (not advisable with the ncurses UI), any valid positive number for that file descriptor, or a filename. To log share data to a file named "share.log", you can use either: ./bfgminer --sharelog 50 -o xxx -u yyy -p zzz 50>share.log ./bfgminer --sharelog share.log -o xxx -u yyy -p zzz For every share found, data will be logged in a CSV (Comma Separated Value) format: timestamp,disposition,target,pool,dev,thr,sharehash,sharedata For example (this is wrapped, but it's all on one line for real): 1335313090,reject, ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000, http://localhost:8337,GPU0,0, 6f983c918f3299b58febf95ec4d0c7094ed634bc13754553ec34fc3800000000, 00000001a0980aff4ce4a96d53f4b89a2d5f0e765c978640fe24372a000001c5 000000004a4366808f81d44f26df3d69d7dc4b3473385930462d9ab707b50498 f681634a4f1f63d01a0cd43fb338000000000080000000000000000000000000 0000000000000000000000000000000000000000000000000000000080020000 --- RPC API For RPC API details see the README.RPC file --- FAQ Q: Why can't BFGMiner find lib even after I installed it from source code? A: On UNIX-like operating systems, you often need to run one or more commands to reload library caches, such as "ldconfig" or similar. A couple of systems (such as Fedora) ship with /usr/local/lib missing from their library search path. In this case, you can usually add it like this: echo /usr/local/lib >/etc/ld.so.conf.d/local.conf Please note that if your libraries installed into lib64 instead of lib, you should use that in the ld.so config file above instead. Q: BFGMiner segfaults when I change my shell window size. A: Older versions of libncurses have a bug to do with refreshing a window after a size change. Upgrading to a new version of curses will fix it. Q: Can I mine on servers from different networks (eg smartcoin and bitcoin) at the same time? A: No, BFGMiner keeps a database of the block it's working on to ensure it does not work on stale blocks, and having different blocks from two networks would make it invalidate the work from each other. Q: Can I configure BFGMiner to mine with different login credentials or pools for each separate device? A: No such feature has been implemented to support this. Q: Can I put multiple pools in the config file? A: Yes, check the example.conf file. Alternatively, set up everything either on the command line or via the menu after startup and choose [S]ettings->[W]rite config file and the file will be loaded one each startup. Q: The build fails with gcc is unable to build a binary. A: Remove the "-march=native" component of your CFLAGS as your version of GCC does not support it. Q: Can you implement feature X? A: I can, but time is limited, and people who donate are more likely to get their feature requests implemented. Q: Work keeps going to my backup pool even though my primary pool hasn't failed? A: BFGMiner checks for conditions where the primary pool is lagging and will pass some work to the backup servers under those conditions. The reason for doing this is to try its absolute best to keep the devices working on something useful and not risk idle periods. You can disable this behaviour with the option --failover-only. Q: Is this a virus? A: As BFGMiner is being packaged with other trojan scripts, some antivirus software is falsely accusing bfgminer.exe as being the actual virus, rather than whatever it is being packaged with. If you installed BFGMiner yourself from a reputable source then you do not have a virus on your computer. Complain to your antivirus software company. They seem to be flagging even source code from BFGMiner as malicious now, even though text source files can't do anything by themselves. Q: Can you modify the display to include more of one thing in the output and less of another, or can you change the quiet mode or can you add yet another output mode? A: Everyone will always have their own view of what is important to monitor. The defaults are very sane and I have very little interest in changing this any further. Q: Why is my efficiency above/below 1.00? A: Efficiency simply means how many shares you return for the amount of bandwidth used. It does not correlate with efficient use of your hardware, and is a measure of a combination of hardware speed, block luck, pool design and many other factors. Q: What are the best parameters to pass for X pool/hardware/device. A: Virtually always, the DEFAULT parameters give the best results. Most user defined settings lead to worse performance. Q: What happened to CPU mining? A: See README.CPU for more information. Q: Is there a GUI version? A: Yes, Nate Woolls maintains a GUI interface for BFGMiner called MultiMiner, available at http://multiminerapp.com Q: I'm having an issue. What debugging information should I provide? A: Start BFGMiner with your regular commands and add -D -T --verbose and provide the full startup output and a summary of your hardware, operating system, and if applicable, ATI driver version and ATI stream version. Q: Can I mine with BFGMiner on a Mac? A: BFGMiner will compile on OS X, but the performance of GPU mining is compromised due to the OpenCL implementation on OS X, there is no temperature or fanspeed monitoring and the cooling design of most Macs, despite having powerful GPUs, will usually not cope with constant usage leading to a high risk of thermal damage. It is highly recommended not to mine on a Mac unless it is with an external USB device. Q: My network gets slower and slower and then dies for a minute? A; Try the --net-delay option if you are on a getwork or GBT server. Q: How do I tune for P2Pool? A: P2Pool has very rapid expiration of work and new blocks, it is suggested you decrease intensity by 1 from your optimal value, and decrease GPU threads to 1 with -g 1. It is also recommended to use --failover-only since the work is effectively like a different block chain. If mining with a Mini Rig, it is worth adding the --bfl-range option. Q: Are OpenCL kernels from other mining software useable in BFGMiner? A: No, the APIs are slightly different between the different software and they will not work. Q: I run PHP on windows to access the API with the example miner.php. Why does it fail when php is installed properly but I only get errors about Sockets not working in the logs? A: Please check http://us.php.net/manual/en/sockets.installation.php Q: What is a PGA? A: At the moment, BFGMiner supports 5 FPGAs: BitForce, Icarus, ModMiner, X6500, and ZTEX. They are Field-Programmable Gate Arrays that have been programmed to do Bitcoin mining. Since the acronym needs to be only 3 characters, the "Field-" part has been skipped. "PGA" is also used for devices built with Application-Specific Integrated Circuits (ASICs). Q: What is an ASIC? A: They are Application Specific Integrated Circuit devices and provide the highest performance per unit power due to being dedicated to only one purpose. Q: How do I get my BFL/Icarus/Lancelot/Cairnsmore device to auto-recognise? A: On Linux, if the /dev/ttyUSB* devices don't automatically appear, the only thing that needs to be done is to load the driver for them: BitForce: sudo modprobe ftdi_sio vendor=0x0403 product=0x6014 Erupter: sudo modprobe cp210x vendor=0x10c4 product=0xea60 Icarus: sudo modprobe pl2303 vendor=0x067b product=0x0230 Lancelot: sudo modprobe ftdi_sio vendor=0x0403 product=0x6001 Cairnsmore: sudo modprobe ftdi_sio vendor=0x0403 product=0x8350 On some systems you must manally install the driver required for the device. OpenWrt drivers (install with opkg): FTDI: kmod-usb-serial-ftdi Erupter: kmod-usb-serial-cp210x Icarus: kmod-usb-serial-pl2303 Windows drivers: FTDI: http://www.ftdichip.com/Drivers/VCP.htm Erupter: http://www.silabs.com/products/mcu/pages/usbtouartbridgevcpdrivers.aspx Icarus: http://prolificusa.com/pl-2303hx-drivers/ Q: I ran cgminer, and now BFGMiner doesn't work! A: cgminer has its own non-standard implementations of the drivers for most USB devices, and requires you to replace the official drivers with WinUSB on Windows (usually using Zadig). Before you can use BFGMiner, you will need to restore the original driver. Uninstalling the device (and WinUSB driver) from Device Manager and re-plugging it will usually trigger driver re-installation to the default drivers. Q: On Linux I can see the /dev/ttyUSB* devices, but BFGMiner can't mine on them? A: Make sure you have the required privileges to access the /dev/ttyUSB* devices: sudo ls -las /dev/ttyUSB* will give output like: 0 crw-rw---- 1 root video 188, 0 2012-09-11 13:49 /dev/ttyUSB0 This means your account must have the group 'video' or root privileges. To permanently give your account the 'video' group: sudo usermod -G video -a `whoami` Then logout and back in again. Q: Can I mine scrypt with FPGAs or ASICs? A: Currently no. Bitcoin ASICs are only useful for SHA256d systems and FPGAs generally aren't designed to handle scrypt efficiently. Q: Why does BFGMiner show difficulty 0 when mining scrypt? A: BFGMiner consistently uses pdiff measurement for difficulty everywhere, rather than other measurements that may exist. For scrypt, pdiff 1 is very difficult, and higher get exponentially harder. It is unlikely you will want to use pdiff 1+ with scrypt until you have FPGAs and/or ASICs for it. Q: What is stratum and how do I use it? A: Stratum is a protocol designed to reduce resources for mining pools at the cost of keeping the miner in the dark and blindly transferring his mining authority to the pool. It is a return to the problems of the old centralized "getwork" protocol, but capable of scaling to hardware of any speed like the standard GBT protocol. If a pool uses stratum instead of GBT, BFGMiner will automatically detect it and switch to the support as advertised if it can. Stratum uses direct TCP connections to the pool and thus it will NOT currently work through a http proxy but will work via a socks proxy if you need to use one. If you input the stratum port directly into your configuration, or use the special prefix "stratum+tcp://" instead of "http://", BFGMiner will ONLY try to use stratum protocol mining. Q: Why don't the statistics add up: Accepted, Rejected, Stale, Hardware Errors, Diff1 Work, etc. when mining greater than 1 difficulty shares? A: As an example, if you look at 'Difficulty Accepted' in the RPC API, the number of difficulty shares accepted does not usually exactly equal the amount of work done to find them. If you are mining at 8 difficulty, then you would expect on average to find one 8 difficulty share, per 8 single difficulty shares found. However, the number is actually random and converges over time as it is an average, not an exact value, thus you may find more or less than the expected average. --- This code is provided entirely free of charge by the programmer in his spare time so donations would be greatly appreciated. Please consider donating to the address below. Luke-Jr 1QATWksNFGeUJCWBrN4g6hGM178Lovm7Wh bfgminer-bfgminer-3.10.0/README.ASIC000066400000000000000000000166401226556647300166000ustar00rootroot00000000000000SUPPORTED DEVICES Currently supported ASIC devices include Avalon, Bitfountain's Block Erupter series (both USB and blades), a large variety of Bitfury-based miners, Butterfly Labs' SC range of devices, HashBuster boards, Klondike modules, and KnCminer's Mercury, Jupiter and Saturn. AVALON ------ Currently, Avalon boards are supported only by connecting them directly (or via a hub) to a regular PC running BFGMiner. It is also possible to install the OpenWrt packages of BFGMiner to the Avalon's embedded controller, but this is not a simple task due to its lack of available flash space. To use the Avalon from a regular PC, you will need to specify two options: First, add the -S option specifying the avalon driver specifically. For example, -S avalon:\\.\COM9 Next, use the --avalon-options copying the command as used by the internal router used by the Avalon. eg: --avalon-options 115200:24:10:45:282 The values are baud : miners : asic count : timeout : frequency. Baud: The device is essentially hard coded to emulate 115200 baud so you shouldn't change this. Miners: Most Avalons are 3 module devices, which come to 24 miners. 4 module devices would use 32 here. Asic count: Virtually all have 10, so don't change this. Timeout: This defines how long the device will work on a work item before accepting new work to replace it. It should be changed according to the frequency (last setting). It is possible to set this a little lower if you are trying to tune for short block mining (eg p2pool) but much lower and the device will start creating duplicate shares. Sample settings for valid different frequencies (last 2 values): 34:375 36:350 39:325 43:300 45:282 47:270 50:256 Frequency: This is the clock speed of the devices. Only specific values work, 256, 270, 282 (default), 300, 325, 350 and 375. If you use the full curses based interface with Avalons you will get this information: AVA 0: 22/ 46C 60%/2400R The values are: ambient temp / highest device temp set fan % / lowest detected fan RPM. Check the API for more detailed information. BFSB, MEGABIGPOWER, AND METABANK BITFURY BOARDS ----------------------------------------------- Both BFSB and MegaBigPower (V2 only at this time) boards are supported with the "bfsb" driver. Metabank boards are supported with the "metabank" driver. These drivers are not enabled by default, since they must be run on a Raspberry Pi in a specific hardware configuration with the boards. To enable them, you must build with --enable-bfsb or --enable-metabank. Do not try to use these drivers without the manufacturer-supported hardware configuration! Also note that these drivers do not properly support thermal shutdown at this time, and without sufficient cooling you may destroy your board or chips! To start BFGMiner, ensure your Raspberry Pi's SPI is enabled (you can run the raspi-config utility for this). For Metabank boards, you must also load the I2C drivers (do not try to modprobe both with a single command; it won't work): modprobe i2c-bcm2708 modprobe i2c-dev Then you must run BFGMiner as root, with the proper driver selected. For example: sudo bfgminer -S bfsb:auto BI*FURY ------- Bi*Fury should just work; you may need to use -S bifury: On Windows, you will need to install the standard USB CDC driver for it. http://store.bitcoin.org.pl/support If you want to upgrade the firmware, unplug your device. You will need to temporarily short a circuit. With the USB connector pointing forward, and the heatsink down, look to the forward-right; you will see two tiny lights, a set of 2 terminals, and a set of 3 terminals. The ones you need to short are the set of 2. With them shorted, plug the device back into your computer. It will then pretend to be a mass storage disk drive. If you use Windows, you can play along and just overwrite the firmware.bin file. If you use Linux, you must use mcopy: mcopy -i /dev/disk/by-id/usb-NXP_LPC1XXX_IFLASH_ISP-0:0 firmware.bin \ ::/firmware.bin After this is complete, unplug the device again and un-short the 2 terminals. This completes the upgrade and you can now plug it back in and start mining. BIG PICTURE MINING BITFURY USB ------------------------------ These miners are sensitive to unexpected data. Usually you can re-plug them to reset to a known-good initialisation state. To ensure they are properly detected and used with BFGMiner, you must specify -S bigpic:all (or equivalent) options prior to any other -S options (which might probe the device and confuse it). BLOCK ERUPTER BLADE ------------------- Blades communicate over Ethernet using the old but simple getwork mining protocol. If you build BFGMiner with libmicrohttpd, you can have it work with one or more blades. First, start BFGMiner with the --http-port option. For example: bfgminer --http-port 8330 Then configure your blade to connect to your BFGMiner instance on the same port, with a unique username per blade. It will then show up as a SGW device and should work more or less like any other miner. BLOCK ERUPTER USB ----------------- These will autodetect if supported by the device; otherwise, you need to use the '--scan-serial erupter:' option to tell BFGMiner what device to probe; if you know you have no other serial devices, or only ones that can tolerate garbage, you can use '--scan-serial erupter:all' to probe all serial ports. They communicate with the Icarus protocol, which has some additional options in README.FPGA KLONDIKE -------- --klondike-options Set klondike options clock:temptarget KNCMINER -------- The KnC miner uses a BeagleBoneBlack(BBB) as the host, this is pluged into a cape that holds the FPGA and connections for 4-6 ASICS depending on the cape version. The BBB runs the Angstrom linux distribution, the following is a step by step install for BFGMiner on this distro; -----------------Start------------ cat >/etc/opkg/feeds.conf <<\EOF src/gz noarch http://feeds.angstrom-distribution.org/feeds/v2013.06/ipk/eglibc/all/ src/gz base http://feeds.angstrom-distribution.org/feeds/v2013.06/ipk/eglibc/cortexa8hf-vfp-neon/base/ src/gz beaglebone http://feeds.angstrom-distribution.org/feeds/v2013.06/ipk/eglibc/cortexa8hf-vfp-neon/machine/beaglebone/ EOF opkg update opkg install angstrom-feed-configs rm /etc/opkg/feeds.conf opkg update opkg install update-alternatives opkg install automake autoconf make gcc cpp binutils git less pkgconfig-dev ncurses-dev libtool nano bash i2c-tools-dev while ! opkg install libcurl-dev; do true; done ln -s aclocal-1.12 /usr/share/aclocal curl http://www.digip.org/jansson/releases/jansson-2.0.1.tar.bz2 | tar -xjvp cd jansson-2.0.1 ./configure --prefix=/usr CC=arm-angstrom-linux-gnueabi-gcc --disable-static NM=arm-angstrom-linux-gnueabi-nm make install && ldconfig cd .. git clone git://github.com/luke-jr/bfgminer cd bfgminer ./autogen.sh git clone git://github.com/troydhanson/uthash ./configure --host=arm-angstrom-linux-gnueabi --enable-knc CFLAGS="-I$PWD/uthash/src -O0 -ggdb" make AR=arm-angstrom-linux-gnueabi-ar /etc/init.d/cgminer.sh stop ./bfgminer -S knc:auto -c /config/cgminer.conf ---------------END------------- BFGMiner has also been incorporated into an unofficial firmware by uski01 called Bertmod this can be found on the kncminer forum. --- This code is provided entirely free of charge by the programmer in his spare time so donations would be greatly appreciated. Please consider donating to the address below. Luke-Jr 1QATWksNFGeUJCWBrN4g6hGM178Lovm7Wh bfgminer-bfgminer-3.10.0/README.CPU000066400000000000000000000047111226556647300165040ustar00rootroot00000000000000EXECUTIVE SUMMARY ON CPU USAGE: By default, BFGMiner will NOT mine on CPUs unless it is explicitly compiled with support and told to do so. CPU mining for bitcoin is generally considered to be obsolete as it requires considerably more power per hash computed than either GPU, FPGA or ASIC based mining. If you still wish to mine using CPUs you will need to build a custom binary with support enabled (refer to the build notes in README for further information). The following CPU mining options are available: --algo|-a Specify sha256 implementation for CPU mining: fastauto* Quick benchmark at startup to pick a working algorithm auto Benchmark at startup and pick fastest algorithm c Linux kernel sha256, implemented in C 4way tcatm's 4-way SSE2 implementation via VIA padlock implementation cryptopp Crypto++ C/C++ implementation cryptopp_asm32 Crypto++ 32-bit assembler implementation sse2_32 SSE2 32 bit implementation for i386 machines sse2_64 SSE2 64 bit implementation for x86_64 machines sse4_64 SSE4.1 64 bit implementation for x86_64 machines altivec_4way Altivec implementation for PowerPC G4 and G5 machines --cpu-threads|-t Number of miner CPU threads (default: 4) CPU FAQ: Q: What happened to CPU mining? A: Being increasingly irrelevant for most users, and a maintenance issue, it is no longer under active development and will not be supported unless someone steps up to help maintain it. No binary builds supporting CPU mining will be released for Windows but CPU mining can be built into BFGMiner when it is compiled. For builds which do support CPU mining, it is still disabled by default, and must be enabled using the -S cpu:auto option. Q: So, should I even try CPU mining? A: No, it honestly will waste more power and time than it is worth at this stage. If you really wish to mine, you are better off getting a suitable ASIC instead. Q: But, if the computers aren't mine and I'm not paying for the power? A: If you are trying to use computers that aren't yours, you really should reconsider. Attempting to mine with a large number of devices will often impact negatively on many pools and will likely pay almost nothing due to the small number of successful shares submitted. Apart from that, you will probably get into trouble with someone down the track for misusing the computers. bfgminer-bfgminer-3.10.0/README.Debian000066400000000000000000000015661226556647300172440ustar00rootroot00000000000000Debian packaging notes by Graeme Humphries : Here's the short list on how to build your own Debian/Ubuntu package: * Install and configure pbuilder (https://wiki.ubuntu.com/PbuilderHowto) * Create the Debian specific bitforce module config from its patch: patch -p1 < debian/patches/bfgminer-bitforce.diff * Build the package using pbuilder: pdebuild * The built package should end up in /var/cache/pbuilder/result Debian packaging depends on new release versions being entered into debian/changelog. The versions in that file need the Debian/Ubuntu release specific suffix (0precise1) as all packages go into a shared pool, and so they need to have distinct names for different releases. When a new release happens, just add it with some brief changelog notes to that file, and it should be ready to generate a new source package and upload to the Launchpad PPA. bfgminer-bfgminer-3.10.0/README.FPGA000066400000000000000000000334211226556647300165720ustar00rootroot00000000000000 This README contains extended details about FPGA mining with BFGMiner ModMiner (MMQ) -------------- ModMiner does not have any persistent storage for bitstreams, so BFGMiner must upload it after power on. For this to work, you must first download the necessary bitstream file to BFGMiner's "bitstreams" directory, and give it the name "fpgaminer_x6500-overclocker-0402.bit". You can download this bitstream from FPGA Mining LLC's website: http://www.fpgamining.com/documentation/firmware - If the MMQ doesn't respond to BFGMiner at all, or the red LED isn't flashing then you will need to reset the MMQ. The red LED should always be flashing when it is mining or ready to mine. To reset the MMQ, you are best to press the left "RESET" button on the backplane, then unplug and replug the USB cable. If your MMQ doesn't have a button on the "RESET" pad, you need to join the two left pads of the "RESET" pad with conductive wire to reset it. Cutting a small (metal) paper-clip in half works well for this. Then unplug the USB cable, wait for 5 seconds, then plug it back in. After you press reset, the red LED near the USB port should blink continuously. If it still wont work, power off, wait for 5 seconds, then power on the MMQ This of course means it will upload the bitstream again when you start BFGMiner. - Device 0 is on the power end of the board. - You must make sure you have an appropriate firmware in your MMQ Read here for official details of changing the firmware: http://wiki.btcfpga.com/index.php?title=Firmware The basics of changing the firmware are: You need two short pieces of conductive wire if your MMQ doesn't have buttons on the "RESET" and "ISP" pads on the backplane board. Cutting a small (metal) paper-clip in half works well for this. Join the 2 left pads of the "RESET" pad with wire and the led will dim. Without disconnecting the "RESET", join the 2 left pads of the "ISP" pad with a wire and it will stay dim. Release "RESET" then release "ISP" and is should still be dim. Unplug the USB and when you plug it back in it will show up as a mass storage device. Linux: (as one single line): mcopy -i /dev/disk/by-id/usb-NXP_LPC134X_IFLASH_ISP000000000-0:0 modminer091012.bin ::/firmware.bin Windows: delete the MSD device file firmware.bin and copy in the new one rename the new file and put it under the same name 'firmware.bin' Disconnect the USB correctly (so writes are flushed first) Join and then disconnect "RESET" and then plug the USB back in and it's done. Best to update to one of the latest 2 listed below if you don't already have one of them in your MMQ. The current latest different firmware are: Latest for support of normal or TLM bitstream: http://btcfpga.com/files/firmware/modminer092612-TLM.bin Latest with only normal bitstream support (Temps/HW Fix): http://btcfpga.com/files/firmware/modminer091012.bin The code is currently tested on the modminer091012.bin firmware. This comment will be updated when others have been tested. - On many Linux distributions there is an app called modem-manager that may cause problems when it is enabled, due to opening the MMQ device and writing to it. The problem will typically present itself by the flashing led on the backplane going out (no longer flashing) and it takes a power cycle to re-enable the MMQ firmware - which then can lead to the problem reoccurring. You can either disable/uninstall modem-manager if you don't need it or: a (hack) solution to this is to blacklist the MMQ USB device in /lib/udev/rules.d/77-mm-usb-device-blacklist.rules Adding 2 lines like this (just above APC) should help. # MMQ ATTRS{idVendor}=="1fc9", ATTRS{idProduct}=="0003", ENV{ID_MM_DEVICE_IGNORE}="1" The change will be lost and need to be re-done, next time you update the modem-manager software. BitForce (BFL) -------------- --bfl-range Use nonce range on BitForce devices if supported This option is only for BitForce devices. Earlier devices such as the single did not have any way of doing small amounts of work which meant that a lot of work could be lost across block changes. Some of the Mini Rigs have support for doing this, so less work is lost across a longpoll. However, it comes at a cost of 1% in overall hashrate so this feature is disabled by default. It is only recommended you enable this if you are mining with a Mini Rig on P2Pool. BFGMiner also bundles a bitforce-firmware-flash utility on Linux. Using this, you can change the bitstream firmware on BitForce Singles. It is untested with other devices. Use at your own risk! Windows users may use Butterfly Labs EasyMiner to change firmware. To compile: make bitforce-firmware-flash To flash your BFL, specify the BFL port and the flash file e.g.: sudo ./bitforce-firmware-flash /dev/ttyUSB0 alphaminer_832.bfl It takes a bit under 3 minutes to flash a BFL and shows a progress % counter Once it completes, you may also need to wait about 15 seconds, then power the BFL off and on again. If you get an error at the end of the BFL flash process stating: "Error reading response from ZBX" it may have worked successfully anyway. Test mining on it to be sure if it worked or not. You need to give BFGMiner about 10 minutes mining with the BFL to be sure of the Mh/s value reported with the changed firmware - and the MH/s reported will be less than the firmware speed since you lose work on every block change. Icarus (ICA) ------------ There are two hidden options in BFGMiner when Icarus support is compiled in: --icarus-options Set specific FPGA board configurations - one set of values for all or comma separated baud:work_division:fpga_count:quirks baud The Serial/USB baud rate - 115200 or 57600 only - default 115200 work_division The fraction of work divided up for each FPGA chip - 1, 2, 4 or 8 e.g. 2 means each FPGA does half the nonce range - default 2 fpga_count The actual number of FPGA working - this would normally be the same as work_division - range is from 1 up to 'work_division' It defaults to the value of work_division - or 2 if you don't specify work_division quirks List of quirks to enable and disable (after a minus sign): r Reopen device regularly to workaround buggy Icarus USB chipset (enabled by default) If you define fewer comma separated values than Icarus devices, the last values will be used for all extra devices. An example would be: --icarus-options 57600:2:1:-r This would mean: use 57600 baud, the FPGA board divides the work in half however only 1 FPGA actually runs on the board, and don't reopen the device (e.g. like an early CM1 Icarus copy bitstream). --icarus-timing Set how the Icarus timing is calculated - one setting/value for all or comma separated default[=N] Use the default Icarus hash time (2.6316ns) short=[N] Calculate the hash time and stop adjusting it at ~315 difficulty 1 shares (~1hr) long=[N] Re-calculate the hash time continuously value[=N] Specify the hash time in nanoseconds (e.g. 2.6316) and abort time (e.g. 2.6316=80) If you define fewer comma separated values than Icarus devices, the last values will be used for all extra devices. Icarus timing is required for devices that do not exactly match a default Icarus Rev3 in processing speed. If you have an Icarus Rev3 you should not normally need to use --icarus-timing since the default values will maximise the Mh/s and display it correctly. Icarus timing is used to determine the number of hashes that have been checked when it aborts a nonce range (including on a longpoll). It is also used to determine the elapsed time when it should abort a nonce range to avoid letting the Icarus go idle, but also to safely maximise that time. 'short' or 'long' mode should only be used on a computer that has enough CPU available to run BFGMiner without any CPU delays (an active desktop or swapping computer would not be stable enough). Any CPU delays while calculating the hash time will affect the result 'short' mode only requires the computer to be stable until it has completed ~315 difficulty 1 shares, 'long' mode requires it to always be stable to ensure accuracy, however, over time it continually corrects itself. The optional additional =N for 'short' or 'long' specifies the limit to set the timeout to in deciseconds; thus if the timing code calculation is higher while running, it will instead use the limit. This can be set to the appropriate value to ensure the device never goes idle even if the calculation is negatively affected by system performance. When in 'short' or 'long' mode, it will report the hash time value each time it is re-calculated. In 'short' or 'long' mode, the scan abort time starts at 5 seconds and uses the default 2.6316ns scan hash time, for the first 5 nonces or one minute (whichever is longer). In 'default' or 'value' mode the 'constants' are calculated once at the start, based on the default value or the value specified. The optional additional =N specifies to set the default abort at N 1/10ths of a second, not the calculated value, which is 112 for 2.6316ns To determine the hash time value for a non Icarus Rev3 device or an Icarus Rev3 with a different bitstream to the default one, use 'long' mode and give it at least a few hundred shares, or use 'short' mode and take note of the final hash time value (Hs) calculated. You can also use the RPC API 'stats' command to see the current hash time (Hs) at any time. The Icarus code currently only works with an FPGA device that supports the same commands as Icarus Rev3 requires and also is less than ~840Mh/s and greater than 2Mh/s. If an FPGA device does hash faster than ~840Mh/s it should work correctly if you supply the correct hash time nanoseconds value. The timing code itself will affect the Icarus performance since it increases the delay after work is completed or aborted until it starts again. The increase is, however, extremely small and the actual increase is reported with the RPC API 'stats' command (a very slow CPU will make it more noticeable). Using the 'short' mode will remove this delay after 'short' mode completes. The delay doesn't affect the calculation of the correct hash time. X6500 ----- Since X6500 FPGAs do not use serial ports for communication, the --scan-serial option instead works with product serial numbers. By default, any devices with the X6500 USB product id will be used, but some X6500s may have shipped without this product id being configured. If you have any of these, you will need to specify their serial numbers explicitly, and also add -S x6500:auto if you still want to use the autodetection for other properly-configured FPGAs. The serial number of X6500s is usually found on a label applied to the ATX power connector slot. If yours is missing, devices seen by the system can be displayed by starting bfgminer in debug mode. To get a simple list of devices, with the debug output shown, you can use: bfgminer -D -d? -T X6500 does not have any persistent storage for bitstreams, so BFGMiner must upload it after power on. For this to work, you must first download the necessary bitstream file to BFGMiner's "bitstreams" directory, and give it the name "fpgaminer_x6500-overclocker-0402.bit". You can download this bitstream from FPGA Mining LLC's website: http://www.fpgamining.com/documentation/firmware ZTEX FPGA Boards ---------------- http://www.ztex.de sells two boards suitable for mining: the 1.15x with 1 FPGA and the 1.15y with 4 FPGAs. ZTEX distributes their own mining software and drivers. BFGMiner has full support for these boards, as long as they have at least the "dummy" mining bitstreams installed on them. If your boards do not have a mining bitstream yet, you must first, install ZTEX's BTCMiner (requires Java JDK version 6 or later) and install one. === WINDOWS NOTE === Upon first powering up and connecting the board via USB, windows will attempt and fail to find the appropriate drivers. To load the initial firmware on the board, you'll need the EZ-USB FX2 SDK from here: http://www.ztex.de/downloads/#firmware_kit Extract the firmware kit and use the driver within libusb-win32/ztex.inf. Windows should now recognize the board and you're ready to program it. === END OF WINDOWS === Grab the latest miner jar from http://www.ztex.de/btcminer/#download and program the appropriate dummy firmware for your board. The command should look something like (for a single FPGA board): java -cp ZtexBTCMiner-120417.jar BTCMiner -m p -f **FILENAME** -s 01-02-01 For ZTEX 1.15x boards, the dummy bitstream filename is ztex_ufm1_15d.ihx For ZTEX 1.15y boards, the dummy bitstream filename is ztex_ufm1_15y.ihx === WINDOWS NOTE === To mine using BFGMiner, you'll have to swap the USB drivers. The BFGMiner- compatible WinUSB drivers for the board can be generated with this tool: http://sourceforge.net/projects/libwdi/files/zadig/ Basic usage instructions for Zadig can be found here: https://github.com/pbatard/libwdi/wiki/Zadig Once Zadig generates and installs a WinUSB driver, ensure everything is working by running: bfgminer -D -d? -T You should see something like this in the output: [2013-01-22 20:19:11] Found 1 ztex board [2013-01-22 20:19:11] ZTX 0: Found Ztex (ZTEX 0001-02-01-1) === END OF WINDOWS === If you have installed a dummy bitstream, you will now need to copy the main mining bitstream where BFGMiner can find it. This are usually the same as the dummy bitstream filename, but with a number added to the end. Extract the ZtexBTCMiner-120417.jar file using any unzip utility, and look for the proper *.ihx and *.bit files (the latter will be inside the 'fpga' directory of the jar). Copy them to BFGMiner's "bitstreams" directory, and you're ready to start mining. bfgminer-bfgminer-3.10.0/README.GPU000066400000000000000000000556171226556647300165230ustar00rootroot00000000000000EXECUTIVE SUMMARY ON GPU USAGE (SEE ALSO README.scrypt FOR SCRYPT MINING): By default, BFGMiner will NOT mine on any GPUs. If you wish to use your GPU to mine (generally not a good idea), you can explicitly enable it with the -S opencl:auto option. Single pool, regular desktop: bfgminer -S opencl:auto -o http://pool:port -u username -p password If you have configured your system properly, BFGMiner will mine on all GPUs in "dynamic" mode which is designed to keep your system usable and sacrifice some mining performance. Single pool, dedicated miner: bfgminer -S opencl:auto -o http://pool:port -u username -p password -I 9 Single pool, first card regular desktop, 3 other dedicated cards: bfgminer -S opencl:auto -o http://pool:port -u username -p password -I d,9,9,9 Multiple pool, dedicated miner: bfgminer -S opencl:auto -o http://pool1:port -u pool1username -p pool1password -o http://pool2:port -u pool2usernmae -p pool2password -I 9 Add overclocking settings, GPU and fan control for all cards: bfgminer -S opencl:auto -o http://pool:port -u username -p password -I 9 --auto-fan --auto-gpu --gpu-engine 750-950 --gpu-memclock 300 Add overclocking settings, GPU and fan control with different engine settings for 4 cards: bfgminer -S opencl:auto -o http://pool:port -u username -p password -I 9 --auto-fan --auto-gpu --gpu-engine 750-950,945,700-930,960 --gpu-memclock 300 READ WARNINGS AND DOCUMENTATION BELOW ABOUT OVERCLOCKING To configure multiple displays on linux you need to configure your Xorg cleanly to use them all: sudo aticonfig --adapter=all -f --initial On Linux you virtually always need to export your display settings before starting to get all the cards recognised and/or temperature+clocking working: export DISPLAY=:0 --- SETUP FOR GPU SUPPORT: To setup GPU mining support: Install the AMD APP sdk, ideal version (see FAQ!) - put it into a system location. Download the correct version for either 32 bit or 64 bit from here: http://developer.amd.com/tools/heterogeneous-computing/amd-accelerated-parallel-processing-app-sdk/downloads/ The best version for Radeon 5xxx and 6xxx is v2.5, while 7xxx cards need v2.6 or later, 2.7 seems the best. For versions 2.4 or earlier you will need to manually install them: This will give you a file with a name like: AMD-APP-SDK-v2.4-lnx64.tgz (64-bit) or AMD-APP-SDK-v2.4-lnx32.tgz (32-bit) Then: sudo -i cd /opt tar xf /path/to/AMD-APP-SDK-v2.4-lnx##.tgz cd / tar xf /opt/AMD-APP-SDK-v2.4-lnx##/icd-registration.tgz ln -s /opt/AMD-APP-SDK-v2.4-lnx##/include/CL /usr/include ln -s /opt/AMD-APP-SDK-v2.4-lnx##/lib/x86_64/* /usr/lib/ ldconfig Where ## is 32 or 64, depending on the bitness of the SDK you downloaded. If you are on 32 bit, x86_64 in the 2nd last line should be x86 --- INTENSITY INFORMATION: Intensity correlates with the size of work being submitted at any one time to a GPU. The higher the number the larger the size of work. Generally speaking finding an optimal value rather than the highest value is the correct approach as hash rate rises up to a point with higher intensities but above that, the device may be very slow to return responses, or produce errors. NOTE: Running intensities above 9 with current hardware is likely to only diminish return performance even if the hash rate might appear better. A good starting baseline intensity to try on dedicated miners is 9. 11 is the upper limit for intensity while Bitcoin mining, if the GPU_USE_SYNC_OBJECTS variable is set (see FAQ). The upper limit for SHA256d mining is 14 and 20 for scrypt. --- OVERCLOCKING WARNING AND INFORMATION AS WITH ALL OVERCLOCKING TOOLS YOU ARE ENTIRELY RESPONSIBLE FOR ANY HARM YOU MAY CAUSE TO YOUR HARDWARE. OVERCLOCKING CAN INVALIDATE WARRANTIES, DAMAGE HARDWARE AND EVEN CAUSE FIRES. THE AUTHOR ASSUMES NO RESPONSIBILITY FOR ANY DAMAGE YOU MAY CAUSE OR UNPLANNED CHILDREN THAT MAY OCCUR AS A RESULT. The GPU monitoring, clocking and fanspeed control incorporated into BFGMiner comes through use of the ATI Display Library. As such, it only supports ATI GPUs. Even if ADL support is successfully built into BFGMiner, unless the card and driver supports it, no GPU monitoring/settings will be available. BFGMiner supports initial setting of GPU engine clock speed, memory clock speed, voltage, fanspeed, and the undocumented powertune feature of 69x0+ GPUs. The setting passed to BFGMiner is used by all GPUs unless separate values are specified. All settings can all be changed within the menu on the fly on a per-GPU basis. For example: --gpu-engine 950 --gpu-memclock 825 will try to set all GPU engine clocks to 950 and all memory clocks to 825, while: --gpu-engine 950,945,930,960 --gpu-memclock 300 will try to set the engine clock of card 0 to 950, 1 to 945, 2 to 930, 3 to 960 and all memory clocks to 300. You can substitute 0 to leave the engine clock of a card at its default. For example, to keep the 2nd GPU to its default clocks: --gpu-engine 950,0,930,960 --gpu-memclock 300,0,300,300 AUTO MODES: There are two "auto" modes in BFGMiner, --auto-fan and --auto-gpu. These can be used independently of each other and are complementary. Both auto modes are designed to safely change settings while trying to maintain a target temperature. By default this is set to 75 degrees C but can be changed with: --temp-target e.g. --temp-target 80 Sets all cards' target temperature to 80 degrees. --temp-target 75,85 Sets card 0 target temperature to 75, and card 1 to 85 degrees. AUTO FAN: e.g. --auto-fan (implies 85% upper limit) --gpu-fan 25-85,65 --auto-fan Fan control in auto fan works off the theory that the minimum possible fan required to maintain an optimal temperature will use less power, make less noise, and prolong the life of the fan. In auto-fan mode, the fan speed is limited to 85% if the temperature is below "overheat" intentionally, as higher fanspeeds on GPUs do not produce signficantly more cooling, yet significantly shorten the lifespan of the fans. If temperature reaches the overheat value, fanspeed will still be increased to 100%. The overheat value is set to 85 degrees by default and can be changed with: --temp-overheat e.g. --temp-overheat 75,85 Sets card 0 overheat threshold to 75 degrees and card 1 to 85. AUTO GPU: e.g. --auto-gpu --gpu-engine 750-950 --auto-gpu --gpu-engine 750-950,945,700-930,960 GPU control in auto gpu tries to maintain as high a clock speed as possible while not reaching overheat temperatures. As a lower clock speed limit, the auto-gpu mode checks the GPU card's "normal" clock speed and will not go below this unless you have manually set a lower speed in the range. Also, unless a higher clock speed was specified at startup, it will not raise the clockspeed. If the temperature climbs, fanspeed is adjusted and optimised before GPU engine clockspeed is adjusted. If fan speed control is not available or already optimal, then GPU clock speed is only decreased if it goes over the target temperature by the hysteresis amount, which is set to 3 by default and can be changed with: --temp-hysteresis If the temperature drops below the target temperature, and engine clock speed is not at the highest level set at startup, BFGMiner will raise the clock speed. If at any time you manually set an even higher clock speed successfully in BFGMiner, it will record this value and use it as its new upper limit (and the same for low clock speeds and lower limits). If the temperature goes over the cutoff limit (95 degrees by default), BFGMiner will completely disable the GPU from mining and it will not be re-enabled unless manually done so. The cutoff temperature can be changed with: --temp-cutoff e.g. --temp-cutoff 95,105 Sets card 0 cutoff temperature to 95 and card 1 to 105. --gpu-memdiff -125 This setting will modify the memory speed whenever the GPU clock speed is modified by --auto-gpu. In this example, it will set the memory speed to be 125 MHz lower than the GPU speed. This is useful for some cards like the 6970 which normally don't allow a bigger clock speed difference. The 6970 is known to only allow -125, while the 7970 only allows -150. CHANGING SETTINGS: When setting values, it is important to realise that even though the driver may report the value was changed successfully, and the new card power profile information contains the values you set it to, that the card itself may refuse to use those settings. As the performance profile changes dynamically, querying the "current" value on the card can be wrong as well. So when changing values in BFGMiner, after a pause of 1 second, it will report to you the current values where you should check that your change has taken. An example is that 6970 reference cards will accept low memory values but refuse to actually run those lower memory values unless they're within 125 of the engine clock speed. In that scenario, they usually set their real speed back to their default. BFGMiner reports the so-called "safe" range of whatever it is you are modifying when you ask to modify it on the fly. However, you can change settings to values outside this range. Despite this, the card can easily refuse to accept your changes, or worse, to accept your changes and then silently ignore them. So there is absolutely to know how far to/from where/to it can set things safely or otherwise, and there is nothing stopping you from at least trying to set them outside this range. Being very conscious of these possible failures is why BFGMiner will report back the current values for you to examine how exactly the card has responded. Even within the reported range of accepted values by the card, it is very easy to crash just about any card, so it cannot use those values to determine what range to set. You have to provide something meaningful manually for BFGMiner to work with through experimentation. STARTUP / SHUTDOWN: When BFGMiner starts up, it tries to read off the current profile information for clock and fan speeds and stores these values. When quitting BFGMiner, it will then try to restore the original values. Changing settings outside of BFGMiner while it's running may be reset to the startup BFGMiner values when BFGMiner shuts down because of this. --- GPU DEVICE ISSUES and use of --gpu-map GPUs mine with OpenCL software via the GPU device driver. This means you need to have both an OpenCL SDK installed, and the GPU device driver RUNNING (i.e. Xorg up and running configured for all devices that will mine on linux etc.) Meanwhile, the hardware monitoring that BFGMiner offers for AMD devices relies on the ATI Display Library (ADL) software to work. OpenCL DOES NOT TALK TO THE ADL. There is no 100% reliable way to know that OpenCL devices are identical to the ADL devices, as neither give off the same information. BFGMiner does its best to correlate these devices based on the order that OpenCL and ADL numbers them. It is possible that this will fail for the following reasons: 1. The device order is listed differently by OpenCL and ADL (rare), even if the number of devices is the same. 2. There are more OpenCL devices than ADL. OpenCL stupidly sees one GPU as two devices if you have two monitors connected to the one GPU. 3. There are more ADL devices than OpenCL. ADL devices include any ATI GPUs, including ones that can't mine, like some older R4xxx cards. To cope with this, the ADVANCED option for --gpu-map is provided with BFGMiner. DO NOT USE THIS UNLESS YOU KNOW WHAT YOU ARE DOING. The default will work the vast majority of the time unless you know you have a problem already. To get useful information, start BFGMiner with just the -n option. You will get output that looks like this: [2012-04-25 13:17:34] CL Platform 0 vendor: Advanced Micro Devices, Inc. [2012-04-25 13:17:34] CL Platform 0 name: AMD Accelerated Parallel Processing [2012-04-25 13:17:34] CL Platform 0 version: OpenCL 1.1 AMD-APP (844.4) [2012-04-25 13:17:34] Platform 0 devices: 3 [2012-04-25 13:17:34] 0 Tahiti [2012-04-25 13:17:34] 1 Tahiti [2012-04-25 13:17:34] 2 Cayman [2012-04-25 13:17:34] GPU 0 AMD Radeon HD 7900 Series hardware monitoring enabled [2012-04-25 13:17:34] GPU 1 AMD Radeon HD 7900 Series hardware monitoring enabled [2012-04-25 13:17:34] GPU 2 AMD Radeon HD 6900 Series hardware monitoring enabled [2012-04-25 13:17:34] 3 GPU devices max detected Note the number of devices here match, and the order is the same. If devices 1 and 2 were different between Tahiti and Cayman, you could run BFGMiner with: --gpu-map 2:1,1:2 And it would swap the monitoring it received from ADL device 1 and put it to OpenCL device 2 and vice versa. If you have 2 monitors connected to the first device it would look like this: [2012-04-25 13:17:34] Platform 0 devices: 4 [2012-04-25 13:17:34] 0 Tahiti [2012-04-25 13:17:34] 1 Tahiti [2012-04-25 13:17:34] 2 Tahiti [2012-04-25 13:17:34] 3 Cayman [2012-04-25 13:17:34] GPU 0 AMD Radeon HD 7900 Series hardware monitoring enabled [2012-04-25 13:17:34] GPU 1 AMD Radeon HD 7900 Series hardware monitoring enabled [2012-04-25 13:17:34] GPU 2 AMD Radeon HD 6900 Series hardware monitoring enabled To work around this, you would use: -d 0 -d 2 -d 3 --gpu-map 2:1,3:2 If you have an older card as well as the rest it would look like this: [2012-04-25 13:17:34] Platform 0 devices: 3 [2012-04-25 13:17:34] 0 Tahiti [2012-04-25 13:17:34] 1 Tahiti [2012-04-25 13:17:34] 2 Cayman [2012-04-25 13:17:34] GPU 0 AMD Radeon HD 4500 Series hardware monitoring enabled [2012-04-25 13:17:34] GPU 1 AMD Radeon HD 7900 Series hardware monitoring enabled [2012-04-25 13:17:34] GPU 2 AMD Radeon HD 7900 Series hardware monitoring enabled [2012-04-25 13:17:34] GPU 3 AMD Radeon HD 6900 Series hardware monitoring enabled To work around this you would use: --gpu-map 0:1,1:2,2:3 --- GPU FAQ: Q: Can I change the intensity settings individually for each GPU? A: Yes, pass a list separated by commas such as -I d,4,9,9 Q: The CPU usage is high. A: The ATI drivers after 11.6 have a bug that makes them consume 100% of one CPU core unnecessarily, so downgrade to 11.6. Binding BFGMiner to one CPU core on windows can minimise it to 100% (instead of more than one core). Driver version 11.11 on linux and 11.12 on windows appear to have fixed this issue. Note that later drivers may have an apparent return of high CPU usage. Try 'export GPU_USE_SYNC_OBJECTS=1' on Linux before starting BFGMiner. You can also set this variable in windows via a batch file or on the command line before starting BFGMiner with 'setx GPU_USE_SYNC_OBJECTS 1' Q: My GPU hangs and I have to reboot it to get it going again? A: The more aggressively the mining software uses your GPU, the less overclock you will be able to run. You are more likely to hit your limits with BFGMiner and you will find you may need to overclock your GPU less aggressively. The software cannot be responsible and make your GPU hang directly. If you simply cannot get it to ever stop hanging, try decreasing the intensity, and if even that fails, try changing to the poclbm kernel with -k poclbm, though you will sacrifice performance. BFGMiner is designed to try and safely restart GPUs as much as possible, but NOT if that restart might actually crash the rest of the GPUs mining, or even the machine. It tries to restart them with a separate thread and if that separate thread dies, it gives up trying to restart any more GPUs. Q: Can you change the autofan/autogpu to change speeds in a different manner? A: The defaults are sane and safe. I'm not interested in changing them further. The starting fan speed is set to 50% in auto-fan mode as a safety precaution. Q: I upgraded BFGMiner version and my hashrate suddenly dropped! A: No, you upgraded your SDK version unwittingly between upgrades of BFGMiner and that caused your hashrate to drop. Please see the next question. Q: I upgraded my ATI driver/SDK/BFGMiner and my hashrate suddenly dropped! A: The hashrate performance in BFGMiner is tied to the version of the ATI SDK that is installed only for the very first time BFGMiner is run. This generates binaries that are used by the GPU every time after that. Any upgrades to the SDK after that time will have no effect on the binaries. However, if you install a fresh version of BFGMiner, and have since upgraded your SDK, new binaries will be built. It is known that the 2.6 ATI SDK has a huge hashrate penalty on generating new binaries. It is recommended to not use this SDK at this time unless you are using an ATI 7xxx card that needs it. Q: Which AMD SDK is the best for BFGMiner? A: At the moment, versions 2.4 and 2.5 work the best for 5xxx and 6xxx GPUs. SDK 2.6 or 2.7 works best for 7xxx. SDK 2.8 is known to have many problems. If you need to use the 2.6+ SDK (7xxx and later), the phatk kernel will perform poorly, while the diablo or (modified) poclbm kernel are optimised for it. Q: Which AMD driver is the best? A: Unfortunately AMD has a history of having quite a few releases with issues when it comes to mining, either in terms of breaking mining, increasing CPU usage or very low hashrates. Only experimentation can tell you for sure, but some good releases were 11.6, 11.12, 12.4 and 12.8. Note that older cards may not work with the newer drivers. Q: I have multiple SDKs installed, can I choose which one it uses? A: Run bfgminer with the -n option and it will list all the platforms currently installed. Then you can tell BFGMiner which platform to use with --gpu-platform. Q: BFGMiner reports no devices or only one device on startup on Linux although I have multiple devices and drivers+SDK installed properly? A: Try "export DISPLAY=:0" before running BFGMiner. Q: BFGMiner crashes immediately on startup. A: One of the common reasons for this is that you have mixed files on your machine for the driver or SDK. Windows has a nasty history of not cleanly uninstalling files so you may have to use third party tools like driversweeper to remove old versions. The other common reason for this is windows antivirus software is disabling one of the DLLs from working. If BFGMiner starts with the -T option but never starts without it, this is a sure fire sign you have this problem and will have to disable your antivirus or set up some exceptions in it if possible. Q: Is it faster to mine on Windows or Linux? A: It makes no difference. It comes down to choice of operating system for their various features. Linux offers much better long term stability and remote monitoring and security, while Windows offers you overclocking tools that can achieve much more than BFGMiner can do on Linux. Q: BFGMiner cannot see any of my GPUs even though I have configured them all to be enabled and installed OpenCL (+/- Xorg is running and the DISPLAY variable is exported on Linux)? A: Check the output of 'bfgminer -S opencl:auto -d?', it will list what OpenCL devices your installed SDK recognises. If it lists none, you have a problem with your version or installation of the SDK. Q: BFGMiner is mining on the wrong GPU, I want it on the AMD but it's mining on my on board GPU? A: Make sure the AMD OpenCL SDK is installed, check the output of 'bfgminer -S opencl:auto -d?' and use the appropriate parameter with --gpu-platform. Q: I'm getting much lower hashrates than I should be for my GPU? A: Look at your driver/SDK combination and disable power saving options for your GPU. Specifically look to disable ULPS. Make sure not to set intensity above 11 for Bitcoin mining. Q: Can I mine with AMD while running Nvidia or Intel GPUs at the same time? A: If you can install both drivers successfully (easier on windows) then yes, using the --gpu-platform option. Q: Can I mine with Nvidia or Intel GPUs? A: Yes, but the hashrate on these is very poor and it is likely you'll be using much more energy than you'll be earning in coins. Q: Can I mine on both Nvidia and AMD GPUs at the same time? A: No, you must run one instance of BFGMiner with the --gpu-platform option for each. Q: Can I mine on Linux without running Xorg? A: With Nvidia you can, but with AMD you cannot. Q: I'm trying to mine a scrypt cryptocurrency but BFGMiner shows MH values instead of kH and submits no shares? A: Add the --scrypt parameter to your BFGMiner startup command. Q: I can't get anywhere near enough hashrate for scrypt compared to other people? A: You may not have enough system RAM, as this is also required. Q: My scrypt hashrate is high but the pool reports only a tiny proportion of my hashrate? A: You are generating garbage hashes due to your choice of settings. Try decreasing your intensity, do not increase the number of gpu-threads, and consider adding system RAM to match your GPU ram. You may also be using a bad combination of driver and/or SDK. Q: Scrypt fails to initialise the kernel every time? A: Your parameters are too high. Don't add GPU threads, don't set intensity too high, decrease thread concurrency. See the README.scrypt for a lot more help. Q: BFGMiner stops mining (or my GPUs go DEAD) and I can't close it? A: Once the driver has crashed, there is no way for BFGMiner to close cleanly. You will have to kill it, and depending on how corrupted your driver state has gotten, you may even need to reboot. Windows is known to reset drivers when they fail and BFGMiner will be stuck trying to use the old driver instance. Q: I can't get any monitoring of temperatures or fanspeed with BFGMiner when I start it remotely? A: With Linux, make sure to export the DISPLAY variable. On Windows, you cannot access these monitoring values via RDP. This should work with TightVNC or TeamViewer, though. Q: I change my GPU engine/memory/voltage and BFGMiner reports back no change? A: BFGMiner asks the GPU using the ATI Display Library to change settings, but the driver and hardware are free to do what it wants with that query, including ignoring it. Some GPUs are locked with one or more of those properties as well. The most common of these is that many GPUs only allow a fixed difference between the engine clock speed and the memory clock speed (such as the memory being no lower than the engine - 150). Other 3rd party tools have unofficial data on these devices on windows and can get the memory clock speed down further but BFGMiner does not have access to these means. Q: I have multiple GPUs and although many devices show up, it appears to be working only on one GPU splitting it up. A: Your driver setup is failing to properly use the accessory GPUs. Your driver may be misconfigured or you have a driver version that needs a dummy plug on all the GPUs that aren't connected to a monitor. Q: I have some random GPU performance related problem not addressed above. A: Seriously, it's the driver and/or SDK. Uninstall them and start again, also noting there is no clean way to uninstall them so you will likely have to use extra tools or do it manually. Q: Do I need to recompile after updating my driver/SDK? A: No. The software is unchanged regardless of which driver/SDK/ADL version you are running. Q: I do not want BFGMiner to modify my engine/clock/fanspeed? A: BFGMiner only modifies values if you tell it to via the parameters. Otherwise it will just monitor the values. Q: Should I use crossfire/SLI? A: It does not benefit mining at all and depending on the GPU may actually worsen performance. bfgminer-bfgminer-3.10.0/README.OpenWrt000066400000000000000000000024661226556647300174600ustar00rootroot00000000000000Open up /etc/opkg.conf (on your router) in your favourite editor. You will see a line that looks similar to this at the top (depending on your device): src/gz attitude_adjustment http://downloads.openwrt.org/attitude_adjustment/12.09/ar71xx/generic/packages Note the platform following the OpenWrt version. In this example, it is ar71xx. Now add a new line immediately below it, similar to this: src/gz bfgminer http://luke.dashjr.org/programs/bitcoin/files/bfgminer/latest/openwrt/12.09/ar71xx Be sure you put the same platform at the end as your OpenWrt repository! Also note that you can change "latest" to "stable" or "testing" to get better-tested versions. Next, save the file and exit your editor. Tell opkg to reload its package lists by running the command: opkg update If you get a "404 Not Found" error, then your router may not be supported. If you open an issue for your router's platform, it may be possible to add support. If all went well updating your package list, you can now install BFGMiner and any drivers and/or bitstreams you might need: opkg install bfgminer opkg install kmod-usb-serial-ftdi opkg install kmod-usb-serial-cp210x opkg install kmod-usb-serial-pl2303 opkg find bitstream* opkg install bitstream-ztex-ufm1_15y1 opkg install bitstream-ztex-ufm1_15b1 opkg install bitstream-fpgaminer bfgminer-bfgminer-3.10.0/README.RPC000066400000000000000000001570171226556647300165110ustar00rootroot00000000000000 This README contains details about the BFGMiner RPC API It also includes some detailed information at the end, about using miner.php If you start BFGMiner with the "--api-listen" option, it will listen on a simple TCP/IP socket for single string API requests from the same machine running BFGMiner and reply with a string and then close the socket each time If you add the "--api-network" option, it will accept API requests from any network attached computer. You can only access the commands that reply with data in this mode. By default, you cannot access any privileged command that affects the miner - you will receive an access denied status message instead. See --api-allow below for more details. You can specify IP addresses/prefixes that are only allowed to access the API with the "--api-allow" option, e.g. --api-allow W:192.168.0.1,10.0.0/24 will allow 192.168.0.1 or any address matching 10.0.0.*, but nothing else. IP addresses are automatically padded with extra '.0's as needed Without a /prefix is the same as specifying /32. 0/0 means all IP addresses. The 'W:' on the front gives that address/subnet privileged access to commands that modify BFGMiner (thus all API commands). Without it those commands return an access denied status. See --api-groups below to define other groups like W: Privileged access is checked in the order the IP addresses were supplied to "--api-allow" The first match determines the privilege level. Using the "--api-allow" option overrides the "--api-network" option if they are both specified With "--api-allow", 127.0.0.1 is not by default given access unless specified If you start BFGMiner also with the "--api-mcast" option, it will listen for a multicast message and reply to it with a message containing it's API port number, but only if the IP address of the sender is allowed API access. More groups (like the privileged group W:) can be defined using the --api-groups command Valid groups are only the letters A-Z (except R & W are predefined) and are not case sensitive. The R: group is the same as not privileged access. The W: group is (as stated) privileged access (thus all API commands). To give an IP address/subnet access to a group you use the group letter in front of the IP address instead of W: e.g. P:192.168.0/32 An IP address/subnet can only be a member of one group. A sample API group would be: --api-groups P:switchpool:enablepool:addpool:disablepool:removepool:poolpriority:* This would create a group 'P' that can do all current pool commands and all non-privileged commands - the '*' means all non-privileged commands. Without the '*' the group would only have access to the pool commands. Defining multiple groups example: --api-groups Q:quit:restart:*,S:save This would define 2 groups: Q: that can 'quit' and 'restart' as well as all non-privileged commands. S: that can only 'save' and no other commands. The RPC API request can be either simple text or JSON. If the request is JSON (starts with '{'), it will reply with a JSON formatted response, otherwise it replies with text formatted as described further below. The JSON request format required is '{"command":"CMD","parameter":"PARAM"}' (though of course parameter is not required for all requests) where "CMD" is from the "Request" column below and "PARAM" would be e.g. the CPU/GPU number if required. An example request in both formats to set GPU 0 fan to 80%: gpufan|0,80 {"command":"gpufan","parameter":"0,80"} The format of each reply (unless stated otherwise) is a STATUS section followed by an optional detail section. From API version 1.7 onwards, reply strings in JSON and Text have the necessary escaping as required to avoid ambiguity - they didn't before 1.7. For JSON the 2 characters '"' and '\' are escaped with a '\' before them. For Text the 4 characters '|' ',' '=' and '\' are escaped the same way. Only user entered information will contain characters that require being escaped, such as Pool URL, User and Password or the Config save filename, when they are returned in messages or as their values by the API. For API version 1.4 and later: The STATUS section is: STATUS=X,When=NNN,Code=N,Msg=string,Description=string| STATUS=X Where X is one of: W - Warning I - Informational S - Success E - Error F - Fatal (code bug) When=NNN Standard long time of request in seconds. Code=N Each unique reply has a unique Code (See api.c - #define MSG_NNNNNN). Msg=string Message matching the Code value N. Description=string This defaults to the BFGMiner version but is the value of --api-description if it was specified at runtime. For API version 1.10 and later: The list of requests - a (*) means it requires privileged access - and replies: Request Reply Section Details ------- ------------- ------- version VERSION CGMiner=BFGMiner version API=API version config CONFIG Some miner configuration information: GPU Count=N, <- the number of GPUs PGA Count=N, <- the number of PGAs CPU Count=N, <- the number of CPUs Pool Count=N, <- the number of Pools ADL=X, <- Y or N if ADL is compiled in the code ADL in use=X, <- Y or N if any GPU has ADL Strategy=Name, <- the current pool strategy Log Interval=N, <- log interval (--log N) Device Code=GPU ICA , <- spaced list of compiled device drivers OS=Linux/Apple/..., <- operating System Failover-Only=true/false, <- failover-only setting ScanTime=N, <- --scan-time setting Queue=N, <- --queue setting Expiry=N| <- --expiry setting summary SUMMARY The status summary of the miner e.g. Elapsed=NNN,Found Blocks=N,Getworks=N,...| pools POOLS The status of each pool e.g. Pool=0,URL=http://pool.com:6311,Status=Alive,...| devs DEVS Each available GPU, PGA and CPU with their status e.g. GPU=0,Accepted=NN,MHS av=NNN,...,Intensity=D| Last Share Time=NNN, <- standard long time in sec (or 0 if none) of last accepted share Last Share Pool=N, <- pool number (or -1 if none) Last Valid Work=NNN, <- standand long time in sec of last work returned that wasn't an HW: Will not report PGAs if PGA mining is disabled Will not report CPUs if CPU mining is disabled procs DEVS The details of each processor in the same format and details as for DEVS devscan|info DEVS Probes for a device specified by info, which is the same format as the --scan-serial command line option gpu|N GPU The details of a single GPU number N in the same format and details as for DEVS pga|N PGA The details of a single PGA number N in the same format and details as for DEVS This is only available if PGA mining is enabled Use 'pgacount' or 'config' first to see if there are any proc|N PGA The details of a single processor number N in the same format and details as for DEVS cpu|N CPU The details of a single CPU number N in the same format and details as for DEVS This is only available if CPU mining is enabled Use 'cpucount' or 'config' first to see if there are any gpucount GPUS Count=N| <- the number of GPUs pgacount PGAS Count=N| <- the number of PGAs Always returns 0 if PGA mining is disabled proccount PGAS Count=N| <- the number of processors cpucount CPUS Count=N| <- the number of CPUs Always returns 0 if CPU mining is disabled switchpool|N (*) none There is no reply section just the STATUS section stating the results of switching pool N to the highest priority (the pool is also enabled) The Msg includes the pool URL enablepool|N (*) none There is no reply section just the STATUS section stating the results of enabling pool N The Msg includes the pool URL addpool|URL,USR,PASS (*) none There is no reply section just the STATUS section stating the results of attempting to add pool N The Msg includes the pool URL Use '\\' to get a '\' and '\,' to include a comma inside URL, USR or PASS poolpriority|N,... (*) none There is no reply section just the STATUS section stating the results of changing pool priorities See usage below poolquota|N,Q (*) none There is no reply section just the STATUS section stating the results of changing pool quota to Q disablepool|N (*) none There is no reply section just the STATUS section stating the results of disabling pool N The Msg includes the pool URL removepool|N (*) none There is no reply section just the STATUS section stating the results of removing pool N The Msg includes the pool URL N.B. all details for the pool will be lost cpuenable|N (*) none There is no reply section just the STATUS section stating the results of the enable request cpudisable|N (*) none There is no reply section just the STATUS section stating the results of the disable request cpurestart|N (*) none There is no reply section just the STATUS section stating the results of the restart request gpuenable|N (*) none There is no reply section just the STATUS section stating the results of the enable request gpudisable|N (*) none There is no reply section just the STATUS section stating the results of the disable request gpurestart|N (*) none There is no reply section just the STATUS section stating the results of the restart request gpuintensity|N,I (*) none There is no reply section just the STATUS section stating the results of setting GPU N intensity to I gpumem|N,V (*) none There is no reply section just the STATUS section stating the results of setting GPU N memoryclock to V MHz gpuengine|N,V (*) none There is no reply section just the STATUS section stating the results of setting GPU N clock to V MHz gpufan|N,V (*) none There is no reply section just the STATUS section stating the results of setting GPU N fan speed to V% gpuvddc|N,V (*) none There is no reply section just the STATUS section stating the results of setting GPU N vddc to V save|filename (*) none There is no reply section just the STATUS section stating success or failure saving the BFGMiner config to filename The filename is optional and will use the BFGMiner default if not specified quit (*) none There is no status section but just a single "BYE" reply before BFGMiner quits notify NOTIFY The last status and history count of each devices problem e.g. NOTIFY=0,Name=PGA,ID=0,ProcID=0,Last Well=1332432290,...| privileged (*) none There is no reply section just the STATUS section stating an error if you do not have privileged access to the API and success if you do have privilege The command doesn't change anything in BFGMiner pgaenable|N (*) none There is no reply section just the STATUS section stating the results of the enable request You cannot enable a PGA if its status is not WELL This is only available if PGA mining is enabled pgadisable|N (*) none There is no reply section just the STATUS section stating the results of the disable request This is only available if PGA mining is enabled pgaidentify|N (*) none This is equivalent to PROCIDENTIFY on the first processor of any given device This is only available if PGA mining is enabled procenable|N (*) none There is no reply section just the STATUS section stating the results of the enable request procdisable|N (*) none There is no reply section just the STATUS section stating the results of the disable request procidentify|N (*) none There is no reply section just the STATUS section stating the results of the identify request On most supported devices, it will flash the led for approximately 4s All unsupported devices, it will return a warning status message stating that they don't support it For BFL, this adds a 4s delay to the share being processed so you may get a message stating that processing took longer than 7000ms if the request was sent towards the end of the timing of any work being worked on e.g.: BFL0: took 8438ms - longer than 7000ms You should ignore this devdetails DEVDETAILS Each device with a list of their static details This lists all devices including those not supported by the 'devs' command e.g. DEVDETAILS=0,Name=BFL,ID=0,ProcID=0,Driver=bitforce,...| restart (*) none There is no status section but just a single "RESTART" reply before BFGMiner restarts stats STATS Each device or pool that has 1 or more getworks with a list of stats regarding getwork times The values returned by stats may change in future versions thus would not normally be displayed Device drivers are also able to add stats to the end of the details returned check|cmd COMMAND Exists=Y/N, <- 'cmd' exists in this version Access=Y/N| <- you have access to use 'cmd' failover-only|true/false (*) none There is no reply section just the STATUS section stating what failover-only was set to coin COIN Coin mining information: Hash Method=sha256/scrypt, Current Block Time=N.N, <- 0 means none Current Block Hash=XXXX..., <- blank if none LP=true/false, <- LP is in use on at least 1 pool Network Difficulty=NN.NN| debug|setting (*) DEBUG Debug settings The optional commands for 'setting' are the same as the screen curses debug settings You can only specify one setting Only the first character is checked - case insensitive: Silent, Quiet, Verbose, Debug, RPCProto, PerDevice, WorkTime, Normal The output fields are (as above): Silent=true/false, Quiet=true/false, Verbose=true/false, Debug=true/false, RPCProto=true/false, PerDevice=true/false, WorkTime=true/false| setconfig|name,value (*) none There is no reply section just the STATUS section stating the results of setting 'name' The valid values for name are currently: queue, scantime, expiry (integer in the range 0 to 9999) coinbase-sig (string) pgaset|N,opt[,val] (*) none This is equivalent to PROCSET on the first processor of any given device This is only available if PGA mining is enabled procset|N,opt[,val] (*) none There is no reply section just the STATUS section stating the results of setting processor N with opt[,val] If the processor does not support any set options, it will always return a WARN stating pgaset isn't supported If opt=help it will return an INFO status with a help message about the options available The current options are: MMQ opt=clock val=2 to 250 (a multiple of 2) XBS opt=clock val=2 to 250 (a multiple of 2) zero|Which,true/false (*) none There is no reply section just the STATUS section stating that the zero, and optional summary, was done If Which='all', all normal BFGMiner and API statistics will be zeroed other than the numbers displayed by the stats command If Which='bestshare', only the 'Best Share' values are zeroed for each pool and the global 'Best Share' The true/false option determines if a full summary is shown on the BFGMiner display like is normally displayed on exit. When you enable, disable or restart a GPU or PGA, you will also get Thread messages in the BFGMiner status window. The 'poolpriority' command can be used to reset the priority order of multiple pools with a single command - 'switchpool' only sets a single pool to first priority. Each pool should be listed by id number in order of preference (first = most preferred). Any pools not listed will be prioritised after the ones that are listed, in the priority order they were originally If the priority change affects the miner's preference for mining, it may switch immediately. When you switch to a different pool to the current one (including by priority change), you will get a 'Switching to URL' message in the BFGMiner status windows. Obviously, the JSON format is simply just the names as given before the '=' with the values after the '='. If you enable BFGMiner debug (--debug or using RPC), you will also get messages showing some details of the requests received and the replies. There are included 5 program examples for accessing the API: api-example.php - a PHP script to access the API. usage: php api-example.php command by default it sends a 'summary' request to the miner at 127.0.0.1:4028 If you specify a command it will send that request instead. You must modify the line "$socket = getsock('127.0.0.1', 4028);" at the beginning of "function request($cmd)" to change where it looks for BFGMiner. api-example.c - a 'C' program to access the API (with source code). usage: api-example [command [ip/host [port]]] again, as above, missing or blank parameters are replaced as if you entered: api-example summary 127.0.0.1 4028 miner.php - an example web page to access the API. This includes buttons and inputs to attempt access to the privileged commands. See the end of this README.RPC for details of how to tune the display and also to use the option to display a multi-rig summary. api-example.py - a Python script to access the API. usage: python api-example.py [--host HOST] [--port PORT] [command] [parameter] by default it sends a 'summary' request to the miner at 127.0.0.1:4028 If you specify a command it will send that request instead. ---------- Feature Changelog for external applications using the API: API V2.3 (BFGMiner v3.7.0) Modified API command: 'devdetails' - Add 'Processors', 'Manufacturer', 'Product', 'Serial', 'Target Temperature', 'Cutoff Temperature' 'procdetails' - Add 'Manufacturer', 'Product', 'Serial', 'Target Temperature', 'Cutoff Temperature' --------- API V2.2 (BFGMiner v3.6.0) Modified API command: 'pools' - add 'Works' --------- API V2.1 (BFGMiner v3.4.0) Added API command: 'poolquota' - Set pool quota for load-balance strategy. Modified API command: 'devs', 'gpu', 'pga', 'procs' and 'asc' - add 'Device Elapsed', 'Stale', 'Work Utility', 'Difficulty Stale' 'pools' - add 'Quota' 'summary' - add 'Diff1 Work', 'MHS %ds' (where %d is the log interval) --------- API V2.0 (BFGMiner v3.3.0) Removed API commands: 'devdetail' - Use newer 'devdetails' for same information. Modified API commands: 'devs' - display status of each full device only (not processors) 'pga' - lookup and display device by device (not processor) number 'pgacount' - count only full devices (not processors) 'pgaenable' - enable all processors for a numbered full device 'pgadisable' - disable all processors for a numbered full device 'pgaidentify' - choose first processor of numbered full device 'pgaset' - choose first processor of numbered full device Added API commands: 'procs' 'proc' 'proccount' 'procenable' 'procdisable' 'procidentify' 'procset' ---------- API V1.25.3 (BFGMiner v3.2.0) Modified API commands: 'devs', 'pga', 'gpu' - add 'Device Hardware%' and 'Device Rejected%' 'pools' - add 'Pool Rejected%' and 'Pool Stale%' 'setconfig' - add 'http-port' number 'summary' - add 'Device Hardware%', 'Device Rejected%', 'Pool Rejected%', 'Pool Stale%' Removed output limitation: All replies can now be longer than the previous limitation of 64k, and will only be truncated on a 50ms timeout sending. Basic support for cgminer-compatible multicast RPC detection added. ---------- API V1.25.2 (BFGMiner v3.1.4) Modified API commands: 'pgaset' - added: XBS opt=clock val=2 to 250 (and a multiple of 2) ---------- API V1.25.1 (BFGMiner v3.1.2) Added API commands: 'devscan' ---------- API V1.25 (BFGMiner v3.0.1) Modified API commands: 'devs' 'gpu' and 'pga' - add 'Last Valid Work' ---------- API V1.24.1 (BFGMiner v3.0.0) Modified API commands: 'cpustatus' - add 'ProcID' 'gpustatus' - add 'ProcID' 'pgastatus' - add 'ProcID' 'devstatus' - add 'ProcID' 'notify' - add 'ProcID' 'devdetails' - add 'ProcID' 'devdetail' - add 'Name', 'ID', and 'ProcID' 'pools' - add 'Message' 'coin' - add 'Network Difficulty' Pretty much updated every method returning 'Name' and 'ID' to also return 'ProcID'. This is a number starting with 0 for 'a', 1 for 'b', etc. ---------- API V1.24 (BFGMiner v2.10.3) Added API commands: 'zero' Modified API commands: 'pools' - add 'Best Share' 'stats' - rename 'Bytes Sent' and 'Bytes Recv' to 'Net Bytes Sent' and 'Net Bytes Recv' ---------- API V1.23 (BFGMiner v2.10.1) Added API commands: 'pgaset' - with: MMQ opt=clock val=2 to 230 (and a multiple of 2) ---------- API V1.22 (not released) Enforced output limitation: all extra records beyond the output limit of the API (~64k) are ignored and chopped off at the record boundary before the limit is reached however, JSON brackets will be correctly closed and the JSON id will be set to 0 (instead of 1) if any data was truncated. Modified API commands: 'stats' - add 'Times Sent', 'Bytes Sent', 'Times Recv', 'Bytes Recv' ---------- API V1.21 (BFGMiner v2.10.0) Modified API commands: 'summary' - add 'Best Share' ---------- API V1.20b (BFGMiner v2.9.1) Support for the X6500 FPGA was added. ---------- API V1.20 (BFGMiner v2.9.0) Modified API commands: 'pools' - add 'Has Stratum', 'Stratum Active', 'Stratum URL' ---------- API V1.19b (BFGMiner v2.8.1) Added API commands: 'pgaidentify|N' (only works for BitForce Singles so far) Modified API commands: Change pool field name back from 'Diff1 Work' to 'Diff1 Shares' 'devs' - add 'Difficulty Accepted', 'Difficulty Rejected', 'Last Share Difficulty' to all devices 'gpu|N' - add 'Difficulty Accepted', 'Difficulty Rejected', 'Last Share Difficulty' 'pga|N' - add 'Difficulty Accepted', 'Difficulty Rejected', 'Last Share Difficulty' 'notify' - add '*Dev Throttle' (for BitForce Singles) 'pools' - add 'Difficulty Accepted', 'Difficulty Rejected', 'Difficulty Stale', 'Last Share Difficulty' 'stats' - add 'Work Diff', 'Min Diff', 'Max Diff', 'Min Diff Count', 'Max Diff Count' to the pool stats 'setconfig|name,value' - add 'Coinbase-Sig' string ---------- API V1.19 (BFGMiner v2.8.0) Added API commands: 'debug' 'setconfig|name,N' Modified API commands: Change pool field name 'Diff1 Shares' to 'Diff1 Work' 'devs' - add 'Diff1 Work' to all devices 'gpu|N' - add 'Diff1 Work' 'pga|N' - add 'Diff1 Work' 'pools' - add 'Proxy' 'config' - add 'Queue', 'Expiry' ---------- API V1.18 (BFGMiner v2.7.4) Modified API commands: 'stats' - add 'Work Had Roll Time', 'Work Can Roll', 'Work Had Expire', and 'Work Roll Time' to the pool stats 'config' - include 'ScanTime' ---------- API V1.17b (BFGMiner v2.7.1) Modified API commands: 'summary' - add 'Work Utility' 'pools' - add 'Diff1 Shares' ---------- API V1.17 (BFGMiner v2.6.5) Added API commands: 'coin' ---------- API V1.16 (BFGMiner v2.6.5) Added API commands: 'failover-only' Modified API commands: 'config' - include failover-only state ---------- API V1.15 (BFGMiner v2.5.2) Added API commands: 'poolpriority' ---------- API V1.14 (BFGMiner v2.5.0) Modified API commands: 'stats' - more Icarus timing stats added 'notify' - include new device comms error counter The internal code for handling data was rewritten (~25% of the code) Completely backward compatible ---------- API V1.13 (BFGMiner v2.4.4) Added API commands: 'check' Support was added to BFGMiner for API access groups with the --api-groups option It's 100% backward compatible with previous --api-access commands ---------- API V1.12 (BFGMiner v2.4.3) Modified API commands: 'stats' - more pool stats added Support for the ModMiner FPGA was added ---------- API V1.11 (BFGMiner v2.4.2) Modified API commands: 'save' no longer requires a filename (use default if not specified) 'save' incorrectly returned status E (error) on success before. It now correctly returns S (success) ---------- API V1.10 (BFGMiner v2.4.1) Added API commands: 'stats' N.B. the 'stats' command can change at any time so any specific content present should not be relied upon. The data content is mainly used for debugging purposes or hidden options in BFGMiner and can change as development work requires. Modified API commands: 'pools' added "Last Share Time" ---------- API V1.9 (BFGMiner v2.4.0) Added API commands: 'restart' Modified API commands: 'notify' corrected invalid JSON ---------- API V1.8 (BFGMiner v2.3.5) Added API commands: 'devdetails' Support for the ZTEX FPGA was added. ---------- API V1.8-pre (BFGMiner v2.3.4) Added API commands: 'devdetail' ---------- API V1.7 (BFGMiner v2.3.4) Added API commands: 'removepool' Modified API commands: 'pools' added "User" From API version 1.7 onwards, reply strings in JSON and Text have the necessary escaping as required to avoid ambiguity. For JSON the 2 characters '"' and '\' are escaped with a '\' before them. For Text the 4 characters '|' ',' '=' and '\' are escaped the same way. ---------- API V1.6 (cgminer v2.3.2) Added API commands: 'pga' 'pgaenable' 'pgadisable' 'pgacount' Modified API commands: 'devs' now includes Icarus and BitForce FPGA devices. 'notify' added "*" to the front of the name of all numeric error fields. 'config' correct "Log Interval" to use numeric (not text) type for JSON. Support for Icarus and BitForce FPGAs was added. ---------- API V1.5 was not released ---------- API V1.4 (Kano's interim release of cgminer v2.3.1) Added API commands: 'notify' Modified API commands: 'config' added "Device Code" and "OS" Added "When" to the STATUS reply section of all commands. ---------- API V1.3 (cgminer v2.3.1-2) Added API commands: 'addpool' Modified API commands: 'devs'/'gpu' added "Total MH" for each device 'summary' added "Total MH" ---------- API V1.2 (cgminer v2.3.0) Added API commands: 'enablepool' 'disablepool' 'privileged' Modified API commands: 'config' added "Log Interval" Starting with API V1.2, any attempt to access a command that requires privileged security, from an IP address that does not have privileged security, will return an "Access denied" Error Status. ---------- API V1.1 (cgminer v2.2.4) There were no changes to the API commands in cgminer v2.2.4, however support was added to cgminer for IP address restrictions with the --api-allow option. ---------- API V1.1 (cgminer v2.2.2) Prior to V1.1, devs/gpu incorrectly reported GPU0 Intensity for all GPUs. Modified API commands: 'devs'/'gpu' added "Last Share Pool" and "Last Share Time" for each device ---------- API V1.0 (cgminer v2.2.0) Remove default CPU support. Added API commands: 'config' 'gpucount' 'cpucount' 'switchpool' 'gpuintensity' 'gpumem' 'gpuengine' 'gpufan' 'gpuvddc' 'save' ---------- API V0.7 (cgminer v2.1.0) Initial release of the API in the main cgminer git Commands: 'version' 'devs' 'pools' 'summary' 'gpuenable' 'gpudisable' 'gpurestart' 'gpu' 'cpu' 'gpucount' 'cpucount' 'quit' ---------------------------------------- miner.php ========= miner.php is a PHP based interface to the BFGMiner RPC API (referred to simply as the API below). It can show rig details, summaries and input fields to allow you to change BFGMiner. You can also create custom summary pages with it It has two levels to the security: 1) BFGMiner can be configured to allow or disallow API access and access level security for miner.php 2) miner.php can be configured to allow or disallow privileged BFGMiner access, if BFGMiner is configured to allow privileged access for miner.php --------- To use miner.php requires a web server with PHP. Basics: On Xubuntu 11.04, to install Apache and PHP, the commands are: sudo apt-get install apache2 sudo apt-get install php5 sudo /etc/init.d/apache2 reload On Fedora 17: yum install httpd php systemctl restart httpd.service systemctl enable httpd.service --system On windows there are a few options. Try one of these (apparently the first one is easiest - thanks jborkl) http://www.easyphp.org/ http://www.apachefriends.org/en/xampp.html http://www.wampserver.com/en/ --------- The basic BFGMiner option to enable the API is: --api-listen or in your bfgminer.conf: "api-listen" : true, (without the ',' on the end if it is the last item.) If the web server is running on the BFGMiner computer, the above is the only change required to give miner.php basic access to the BFGMiner API. - If the web server runs on a different computer to BFGMiner, you will also need to tell BFGMiner to allow the web server to access BFGMiner's API and tell miner.php where BFGMiner is. Assuming a.b.c.d is the IP address of the web server, you would add the following to BFGMiner: --api-listen --api-allow a.b.c.d or in your bfgminer.conf: "api-listen" : true, "api-allow" : "a.b.c.d", to tell BFGMiner to give the web server read access to the API. You also need to tell miner.php where BFGMiner is. Assuming BFGMiner is at IP address e.f.g.h, then you would edit miner.php and change the line: $rigs = array('127.0.0.1:4028'); to $rigs = array('e.f.g.h:4028'); See --api-network or --api-allow for more access details and how to give write access. You can however, also tell miner.php to find your mining rigs automatically on the local subnet. Add the following to each BFGMiner: --api-mcast or in your bfgminer.conf: "api-mcast" : true, And in miner.php set $mcast = true; This will ignore the value of $rigs and overwrite it with the list of zero or more rigs found on the network in the timeout specified. A rig will not reply if the API settings would mean it would also ignore an API request from the web server running miner.php --------- Once you have a web server with PHP running: copy your miner.php to the main web folder On Xubuntu 11.04: /var/www/ On Fedora 17: /var/www/html/ On Windows: Please check your windows Web/PHP documentation. Assuming the IP address of the web server is a.b.c.d Then in your web browser go to: http://a.b.c.d/miner.php Done :) --------- The rest of this documentation deals with the more complex functions of miner.php, using myminer.php, creating custom summaries and displaying multiple BFGMiner rigs. --------- If you create a file called myminer.php in the same web folder where you put miner.php, miner.php will load it when it runs. This is useful, to put any changes you need to make to miner.php instead of changing miner.php. Thus if you update/get a new miner.php, you won't lose the changes you have made if you put all your changes in myminer.php (and haven't changed miner.php at all) A simple example myminer.php that defines 2 rigs (that I will keep referring to further below) is: Changes in myminer.php supersede what is in miner.php However, this is only valid for variables in miner.php before the 2 lines where myminer.php is included by miner.php: if (file_exists('myminer.php')) include_once('myminer.php'); Every variable in miner.php above those 2 lines, can be changed by simply defining them in your myminer.php So although miner.php originally contains the line: $rigs = array('127.0.0.1:4028'); if you created the example myminer.php given above, it would actually change the value of $rigs that is used when miner.php is running. i.e. you don't have to remove or comment out the $rigs line in miner.php It will be superseded by myminer.php --------- The example myminer.php above also shows how to define more that one rig to be shown my miner.php: Each rig string is 2 or 3 values separated by colons ':' They are simply an IP address or hostname, followed by the port number (usually 4028) and an optional Name string. miner.php displays rig buttons that will show the details of a single rig when you click on it - the button shows either the rig number, or the 'Name' string if you provide it. PHP arrays contain each string separated by a comma, but no comma after the last one. So an example for 3 rigs would be: $rigs = array('192.168.0.100:4028:A', '192.168.0.102:4028:B', '192.168.0.110:4028:C'); Of course each of the rigs listed would also have to have the API running and be set to allow the web server to access the API - as covered earlier in this document. --------- So basically, any variable explained below can be put in myminer.php if you want to set it to something different to its default value and did not want to change miner.php itself every time you update it. Below is a list of the variables that can be changed and an explanation of each. --------- Default: $dfmt = 'H:i:s j-M-Y \U\T\CP'; Define the date format used to print full length dates. If you get the string 'UTCP' on the end of your dates shown, that means you are using an older version of PHP and you can instead use: $dfmt = 'H:i:s j-M-Y \U\T\CO'; The PHP documentation on the date format is here: http://us.php.net/manual/en/function.date.php --------- Default: $title = 'Mine'; Web page title. If you know PHP you can of course use code to define it e.g. $title = 'My Rig at: '.date($dfmt); Which would set the web page title to something like: My Rig at: 10:34:00 22-Aug-2012 UTC+10:00 --------- Default: $readonly = false; Set $readonly to true to force miner.php to be readonly. This means it won't allow you to change BFGMiner even if the RPC API options allow it to. If you set $readonly to false then it will check BFGMiner 'privileged' and will show input fields and buttons on the single rig page, allowing you to change devices, pools and even quit or restart BFGMiner. However, if the 'privileged' test fails, the code will set $readonly to true. --------- Default: $userlist = null; Define password checking and default access null means there is no password checking. $userlist is an array of 3 arrays, e.g. $userlist = array('sys' => array('boss' => 'bpass'), 'usr' => array('user' => 'upass', 'pleb' => 'ppass'), 'def' => array('Pools')); 'sys' is an array of system users and passwords (full access). 'usr' is an array of user level users and passwords (readonly access). 'def' is an array of custompages that anyone not logged in can view. Any of the 3 can be null, meaning there are none of that item. All validated 'usr' users are given $readonly = true; access. All validated 'sys' users are given the $readonly access you defined. If 'def' has one or more values, and allowcustompages is true, then anyone without a password can see the list of custompage buttons given in 'def' and will see the first one when they go to the web page, with a login button at the top right. From the login page, if you login with no username or password, it will show the first 'def' custompage (if there are any). If you are logged in, it will show a logout button at the top right. --------- Default: $notify = true; Set $notify to false to NOT attempt to display the notify command table of data Set $notify to true to attempt to display the notify command on the single rig page. If your older version of BFGMiner returns an 'Invalid command' because it doesn't have notify - it just shows the error status table. --------- Default: $checklastshare = true; Set $checklastshare to true to do the following checks: If a device's last share is 12x expected ago then display as an error. If a device's last share is 8x expected ago then display as a warning. If either of the above is true, also display the whole line highlighted This assumes shares are 1 difficulty shares. Set $checklastshare to false to not do the above checks. 'expected' is calculated from the device Mh/s value. So for example, a device that hashes at 380Mh/s should (on average) find a share every 11.3s. If the last share was found more than 11.3 x 12 seconds (135.6s) ago, it is considered an error and highlighted. If the last share was found more than 11.3 x 8 seconds (90.4s) ago, it is considered a warning and highlighted. The default highlighting is very subtle, so change it if you want it to be more obvious. --------- Default: $poolinputs = false; Set $poolinputs to true to show the input fields for adding a pool and changing the pool priorities on a single rig page. However, if $readonly is true, it will not display them. --------- Default: $rigs = array('127.0.0.1:4028'); Set $rigs to an array of your BFGMiner rigs that are running format: 'IP:Port' or 'Host:Port' or 'Host:Port:Name'. If you only have one rig, it will just show the detail of that rig. If you have more than one rig it will show a summary of all the rigs with buttons to show the details of each rig - the button contents will be 'Name' rather than rig number, if you specify 'Name'. e.g. $rigs = array('127.0.0.1:4028','myrig.com:4028:Sugoi'); --------- Default: $mcast = false; Set $mcast to true to look for your rigs and ignore $rigs. --------- Default: $mcastexpect = 0; The minimum number of rigs expected to be found when $mcast is true. If fewer are found, an error will be included at the top of the page. --------- Default: $mcastaddr = '224.0.0.75'; API Multicast address all miners are listening on. --------- Default: $mcastport = 4028; API Multicast UDP port all miners are listening on. --------- Default: $mcastcode = 'FTW'; The code all miners expect in the Multicast message sent. The message sent is "cgm-code-listport". Don't use the '-' character if you change it. --------- Default: $mcastlistport = 4027; UDP port number that is added to the broadcast message sent that specifies to the miners the port to reply on. --------- Default: $mcasttimeout = 1.5; Set $mcasttimeout to the number of seconds (floating point) to wait for replies to the Multicast message. N.B. the accuracy of the timing used to wait for the replies is ~0.1s so there's no point making it more than one decimal place. --------- Default: $mcastretries = 0; Set $mcastretries to the number of times to retry the multicast. If $mcastexpect is 0, this is simply the number of extra times that it will send the multicast request. N.B. BFGMiner doesn't listen for multicast requests for 1000ms after each one it hears. If $mcastexpect is > 0, it will stop looking for replies once it has found at least $mcastexpect rigs, but it only checks this rig limit each time it reaches the $mcasttimeout limit, thus it can find more than $mcastexpect rigs if more exist. It will send the multicast message up to $mcastretries extra times or until it has found at least $mcastexpect rigs. When using $mcastretries, it is however possible for it to sometimes ignore some rigs on the network if $mcastexpect is less than the number of rigs on the network and some rigs are too slow to reply. --------- Default: $allowgen = false; Set $allowgen to true to allow customsummarypages to use 'gen', false means ignore any 'gen' options. This is disabled by default due to the possible security risk of using it, please see the end of this document for an explanation. --------- Default: $rigipsecurity = true; Set $rigipsecurity to false to show the IP/Port of the rig in the socket error messages and also show the full socket message. --------- Default: $rigtotals = true; $forcerigtotals = false; Set $rigtotals to true to display totals on the single rig page, 'false' means no totals (and ignores $forcerigtotals). If $rigtotals is true, all data is also right aligned. With false, it's as before, left aligned. This option is just here to allow people to set it to false if they prefer the old non-total display when viewing a single rig. Also, if there is only one line shown in any section, then no total will be shown (to save screen space). You can force it to always show rig totals on the single rig page, even if there is only one line, by setting $forcerigtotals = true; --------- Default: $socksndtimeoutsec = 10; $sockrcvtimeoutsec = 40; The numbers are integer seconds. The defaults should be OK for most cases. However, the longer SND is, the longer you have to wait while PHP hangs if the target BFGMiner isn't running or listening. RCV should only ever be relevant if BFGMiner has hung but the API thread is still running, RCV would normally be >= SND. Feel free to increase SND if your network is very slow or decrease RCV if that happens often to you. Also, on some windows PHP, apparently the $usec is ignored (so usec can't be specified). --------- Default: $hidefields = array(); List of fields NOT to be displayed. You can use this to hide data you don't want to see or don't want shown on a public web page. The list of sections are: SUMMARY, POOL, PGA, GPU, NOTIFY, CONFIG, DEVDETAILS, DEVS See the web page for the list of field names (the table headers). It is an array of 'SECTION.Field Name' => 1 This example would hide the slightly more sensitive pool information: Pool URL and pool username: $hidefields = array('POOL.URL' => 1, 'POOL.User' => 1); If you just want to hide the pool username: $hidefields = array('POOL.User' => 1); --------- Default: $ignorerefresh = false; $changerefresh = true; $autorefresh = 0; Auto-refresh of the page (in seconds) - integers only. $ignorerefresh = true/false always ignore refresh parameters. $changerefresh = true/false show buttons to change the value. $autorefresh = default value, 0 means don't auto-refresh. --------- Default: $placebuttons = 'top'; Where to place the Refresh, Summary, Custom Pages, Quit, etc. buttons. Valid values are: 'top' 'bot' 'both' Anything else means don't show them. (case sensitive) --------- Default: $miner_font_family = 'verdana,arial,sans'; $miner_font_size = '13pt'; Change these to set the font and font size used on the web page. --------- Default: $colouroverride = array(); Use this to change the web page colour scheme. See $colourtable in miner.php for the list of possible names to change. Simply put in $colouroverride, just the colours you wish to change. e.g. to change the colour of the header font and background you could do the following: $colouroverride = array( 'td.h color' => 'green', 'td.h background' => 'blue' ); --------- Default: $allowcustompages = true; Should we allow custom pages? (or just completely ignore them and don't display the buttons.) --------- OK this part is more complex: Custom Summary Pages. A custom summary page in an array of 'section' => array('FieldA','FieldB'...) The section defines what data you want in the summary table and the Fields define what data you want shown from that section. Standard sections are: SUMMARY, POOL, PGA, GPU, NOTIFY, CONFIG, DEVDETAILS, DEVS, STATS, COIN Fields are the names as shown on the headers on the normal pages. Fields can be 'name=new name' to display 'name' with a different heading 'new name'. There are also now joined sections: SUMMARY+POOL, SUMMARY+DEVS, SUMMARY+CONFIG, DEVS+NOTIFY, DEVS+DEVDETAILS SUMMARY+COIN These sections are an SQL join of the two sections and the fields in them are named section.field where 'section.' is the section the field comes from See the example further down. Also note: - empty tables are not shown. - empty columns (e.g. an unknown field) are not shown. - missing field data shows as blank. - the field name '*' matches all fields except in joined sections (useful for STATS and COIN). There are 2 hard coded sections: DATE - displays a date table like at the start of 'Summary'. RIGS - displays a rig table like at the start of 'Summary'. Each custom summary requires a second array, that can be empty, listing fields to be totalled for each section. If there is no matching total data, no total will show. --------- Looking at the Mobile example: $mobilepage = array( 'DATE' => null, 'RIGS' => null, 'SUMMARY' => array('Elapsed', 'MHS av', 'Found Blocks=Blks', Accepted', 'Rejected=Rej', 'Utility'), 'DEVS+NOTIFY' => array('DEVS.Name=Name', 'DEVS.ID=ID', 'DEVS.ProcID=Proc', 'DEVS.Status=Status', 'DEVS.Temperature=Temp', 'DEVS.MHS av=MHS av', 'DEVS.Accepted=Accept', 'DEVS.Rejected=Rej', 'DEVS.Utility=Utility', 'NOTIFY.Last Not Well=Not Well'), 'POOL' => array('POOL', 'Status', 'Accepted', 'Rejected=Rej', 'Last Share Time')); $mobilesum = array( 'SUMMARY' => array('MHS av', 'Found Blocks', 'Accepted', 'Rejected', 'Utility'), 'DEVS+NOTIFY' => array('DEVS.MHS av', 'DEVS.Accepted', 'DEVS.Rejected', 'DEVS.Utility'), 'POOL' => array('Accepted', 'Rejected')); $customsummarypages = array('Mobile' => array($mobilepage, $mobilesum)); This will show 5 tables (according to $mobilepage). Each table will have the chosen details for all the rigs specified in $rigs DATE A single box with the web server's current date and time. RIGS A table of the rigs: description, time, versions etc. SUMMARY This will use the API 'summary' command and show the selected fields: Elapsed, MHS av, Found Blocks, Accepted, Rejected and Utility However, 'Rejected=Rej' means that the header displayed for the 'Rejected' field will be 'Rej', instead of 'Rejected' (to save space). Same for 'Found Blocks=Blks' - to save space. DEVS+NOTIFY This will list each of the devices on each rig and display the list of fields as shown. It will also include the 'Last Not Well' field from the 'notify' command so you know when the device was last not well. You will notice that you need to rename each field e.g. 'DEVS.Name=Name' since each field name in the join between DEVS and NOTIFY is actually section.fieldname, not just fieldname. The join code automatically adds 2 fields to each GPU device: 'Name', 'ID', and 'ProcID'. They don't exist in the API 'devs' output but we can correctly calculate them from the GPU device data. These two fields are used to join DEVS to NOTIFY: i.e. find the NOTIFY record that has the same Name/ID/ProcID as the DEVS record and join them. POOL This will use the API 'pools' command and show the selected fields: POOL, Status, Accepted, Rejected, Last Share Time Again, I renamed the 'Rejected' field using 'Rejected=Rej', to save space. $mobilesum lists the sections and fields that should have a total. You can't define them for 'DATE' or 'RIGS' since they are hard coded tables. The example given: SUMMARY Show a total at the bottom of the columns for: MHS av, Found Blocks, Accepted, Rejected, Utility Firstly note that you use the original name i.e. for 'Rejected=Rej' you use 'Rejected', not 'Rej' and not 'Rejected=Rej'. Secondly note that it simply adds up the fields. If you ask for a total of a string field you will get the numerical sum of the string data. DEVS+NOTIFY Simply note in this join example that you must use the original field names which are section.fieldname, not just fieldname. POOL Show a total at the bottom of the columns for: Accepted and Rejected Again remember to use the original field name 'Rejected'. --------- With BFGMiner 2.10.1 and later, miner.php includes an extension to the custom pages that allows you to apply SQL style commands to the data: where, group, and having BFGMiner 3.4.0 also includes another option 'gen'. As an example, miner.php includes a more complex custom page called 'Pools' which includes the extension: $poolsext = array( 'POOL+STATS' => array( 'where' => null, 'group' => array('POOL.URL', 'POOL.Has Stratum', 'POOL.Stratum Active'), 'calc' => array('STATS.Bytes Sent' => 'sum', 'STATS.Bytes Recv' => 'sum'), 'gen' => array('AvShr', 'POOL.Difficulty Accepted/max(POOL.Accepted,1)), 'having' => array(array('STATS.Bytes Recv', '>', 0))) ); This allows you to group records together from one or more rigs. In the example, you'll get each Pool (with the same URL+Stratum info) listed once for all rigs and a sum of each of the fields listed in 'calc'. 'where' and 'having' are an array of fields and restrictions to apply. In the above example, it will only display the rows where it contains the 'STATS.Bytes Recv' field with a value greater than zero. If the row doesn't have the field, it will always be included. All restrictions must be true in order for the row to be included. Any restiction that is invalid or unknown is true. An empty array, or null, means there are no restrictions. A restriction is formatted as: array('Field', 'restriction', 'value') Field is the simple field name as normally displayed, or SECTION.Field if it is a joined section (as in this case 'POOL+STATS'). The list of restrictions are: 'set' - true if the row contains the 'Field' ('value' is not required or used) '=', '<', '<=', '>', '>' - a numerical comparison. 'eq', 'lt', 'le', 'gt', 'ge' - a case insensitive string comparison. You can have multiple restrictions on a 'Field' - but all must be true to include the row containing the 'Field'. e.g. a number range between 0 and 10 would be: array('STATS.Bytes Recv', '>', 0), array('STATS.Bytes Recv', '<', 10) The difference between 'where' and 'having' is that 'where' is applied to the data before grouping it and 'having' is applied to the data after grouping it - otherwise they work the same. 'group' lists the fields to group over and 'calc' lists the function to apply to other fields that are not part of 'group'. You can only see fields listed in 'group' and 'calc'. A 'calc' is formatted as: 'Field' => 'function' The current list of operations available for 'calc' are: 'sum', 'avg', 'min', 'max', 'lo', 'hi', 'count', 'any' The first 4 are as expected - the numerical sum, average, minimum or maximum. 'lo' is the first string of the list, sorted ignoring case. 'hi' is the last string of the list, sorted ignoring case. 'count' is the number of rows in the section specified in the calc e.g. ('DEVS.Name' => 'count') would be the number of DEVS selected in the 'where' of course any valid 'DEVS.Xyz' would give the same 'count' value. 'any' is effectively random: the field value in the 1st row of the grouped data. An unrecognised 'function' uses 'any'. A 'gen' allows you to generate new fields from any php valid function of any of the other fields. e.g. 'gen' => array('AvShr', 'POOL.Difficulty Accepted/max(POOL.Accepted,1)), will generate a new field called GEN.AvShr that is the function shown, which in this case is the average difficulty of each share submitted. THERE IS A SECURITY RISK WITH HOW GEN WORKS! It simply replaces all the variables with their values and then requests PHP to execute the formula - thus if a field value returned from a BFGMiner API request contained PHP code, it could be executed by your web server. Of course BFGMiner doesn't do this, but if you do not control the BFGMiner that returns the data in the API calls, someone could modify BFGMiner to return a PHP string in a field you use in 'gen'. Thus use 'gen' at your own risk. If someone feels the urge to write a mathematical interpreter in PHP to get around this risk, feel free to write one and submit it to the API author for consideration. bfgminer-bfgminer-3.10.0/README.scrypt000066400000000000000000000221171226556647300174010ustar00rootroot00000000000000If you wish to donate to the author of scrypt support, Con Kolivas, for his past work (he no longer maintains this), please send your donations to: 15qSxP1SQcUX3o4nhkfdbgyoWEFMomJ4rZ --- Scrypt mining for GPU is completely different to sha256 used for bitcoin mining. It has very different requirements to bitcoin mining and is a lot more complicated to get working well. Note that it is a ram dependent workload, and requires you to have enough system ram as well as fast enough GPU ram. If you have less system ram than your GPU has, it may not be possible to mine at any reasonable rate. There are 5 main parameters to tuning scrypt, all of which are optional for further fine tuning. When you start scrypt mining with the --scrypt option, BFGMiner will fail IN RANDOM WAYS. They are all due to parameters being outside what the GPU can cope with. NOTE that if it does not fail at startup, the presence of hardware errors (HW) are a sure sign that you have set the parameters too high. DRIVERS AND OPENCL SDK The choice of driver version for your GPU is critical, as some are known to break scrypt mining entirely while others give poor hashrates. As for the OpenCL SDK installed, for AMD it must be version 2.6 or later. Step 1 on Linux: export GPU_MAX_ALLOC_PERCENT=100 If you do not do this, you may find it impossible to scrypt mine. You may find a value of 40 is enough and increasing this further has little effect. export GPU_USE_SYNC_OBJECTS=1 may help CPU usage a little as well. On windows the same commands can be passed via a batch file if the following lines are in the .bat before starting BFGMiner: setx GPU_MAX_ALLOC_PERCENT 100 setx GPU_USE_SYNC_OBJECTS 1 --intensity XX (-I XX) Just like in Bitcoin mining, scrypt mining takes an intensity, however the scale goes from 0 to 20 to mimic the "Aggression" used in mtrlt's reaper. The reason this is crucial is that too high an intensity can actually be disastrous with scrypt because it CAN run out of ram. High intensities start writing over the same ram and it is highly dependent on the GPU, but they can start actually DECREASING your hashrate, or even worse, start producing garbage with HW errors skyrocketing. Note that if you do NOT specify an intensity, BFGMiner uses dynamic mode which is designed to minimise the harm to a running desktop and performance WILL be poor. The lower limit to intensity with scrypt is usually 8 and BFGMiner will prevent it going too low. SUMMARY: Setting this for reasonable hashrates is mandatory. --shaders XXX is a new option where you tell BFGMiner how many shaders your GPU has. This helps BFGMiner try to choose some meaningful baseline parameters. Use this table below to determine how many shaders your GPU has, and note that there are some variants of these cards, and Nvidia shaders are much much lower and virtually pointless trying to mine on. If this is not set, BFGMiner will query the device for how much memory it supports and will try to set a value based on that instead. SUMMARY: This will get you started but fine tuning for optimal performance is required. GPU Shaders 7750 512 7770 640 7850 1024 7870 1280 7950 1792 7970 2048 6850 960 6870 1120 6950 1408 6970 1536 6990 (6970x2) 6570 480 6670 480 6790 800 6450 160 5670 400 5750 720 5770 800 5830 1120 5850 1440 5870 1600 5970 (5870x2) These are only used as a rough guide for BFGMiner, and it is rare that this is all you will need to set. Optional parameters to tune: -g, --thread-concurrency, --lookup-gap --thread-concurrency: This tunes the optimal size of work that scrypt can do. It is internally tuned by BFGMiner to be the highest reasonable multiple of shaders that it can allocate on your GPU. Ideally it should be a multiple of your shader count. vliw5 architecture (R5XXX) would be best at 5x shaders, while VLIW4 (R6xxx and R7xxx) are best at 4x. Setting thread concurrency overrides anything you put into --shaders and is ultimately a BETTER way to tune performance. SUMMARY: Spend lots of time finding the highest value that your device likes and increases hashrate. -g: Once you have found the optimal shaders and intensity, you can start increasing the -g value till BFGMiner fails to start. This is really only of value if you want to run low intensities as you will be unable to run more than 1. SUMMARY: Don't touch this. --lookup-gap This tunes a compromise between ram usage and performance. Performance peaks at a gap of 2, but increasing the gap can save you some GPU ram, but almost always at the cost of significant loss of hashrate. Setting lookup gap overrides the default of 2, but BFGMiner will use the --shaders value to choose a thread-concurrency if you haven't chosen one. SUMMARY: Don't touch this. Related parameters: --worksize XX (-w XX) Has a minor effect, should be a multiple of 64 up to 256 maximum. SUMMARY: Worth playing with once everything else has been tried but will probably do nothing. --vectors XX (-v XX) Vectors are NOT used by the scrypt mining kernel. SUMMARY: Does nothing. Overclocking for scrypt mining: First of all, do not underclock your memory initially. Scrypt mining requires memory speed and on most, but not all, GPUs, lowering memory speed lowers mining performance. Second, absolute engine clock speeds do NOT correlate with hashrate. The ratio of engine clock speed to memory matters, so if you set your memory to the default value, and then start overclocking as you are running it, you should find a sweet spot where the hashrate peaks and then it might actually drop if you increase the engine clock speed further. Third, the combination of motherboard, CPU and system ram ALSO makes a difference, so values that work for a GPU on one system may not work for the same GPU on a different system. A decent amount of system ram is actually required for scrypt mining, and 4GB is suggested. Finally, the power consumption while mining at high engine clocks, very high memory clocks can be far in excess of what you might imagine. For example, a 7970 running with the following settings: --thread-concurrency 22392 --gpu-engine 1135 --gpu-memclock 1890 was using 305W! --- TUNING AN AMD RADEON 7970 Example tuning a 7970 for Scrypt mining: On Linux run this command: export GPU_MAX_ALLOC_PERCENT=100 or on Windows this: setx GPU_MAX_ALLOC_PERCENT 100 in the same console/bash/dos prompt/bat file/whatever you want to call it, before running BFGMiner. First, find the highest thread concurrency that you can start it at. They should all start at 8192 but some will go up to 3 times that. Don't go too high on the intensity while testing and don't change gpu threads. If you cannot go above 8192, don't fret as you can still get a high hashrate. Delete any .bin files so you're starting from scratch and see what bins get generated. First try without any thread concurrency or even shaders, as BFGMiner will try to find an optimal value: bfgminer -I 13 If that starts mining, see what bin was generated, it is likely the largest meaningful TC you can set. Starting it on mine I get: scrypt130302Tahitiglg2tc22392w64l8.bin Note that tc22392 tells you what thread concurrency it was. It should start without TC parameters, but you never know. So if it doesn't, start with --thread-concurrency 8192 and add 2048 to it at a time till you find the highest value it will start successfully at. If you wish to get a little extra from your hardware, you may also try overclocking. Do note that this will damage your GPUs and void your warranty, so unless you are willing to take that risk, skip the --gpu-engine and --gpu-memclock sections! Then start overclocking the eyeballs off your memory, as 7970s are exquisitely sensitive to memory speed and amazingly overclockable but please make sure it keeps adequately cooled with --auto-fan! Do it while it's running from the GPU menu. Go up by 25 at a time every 30 seconds or so until your GPU crashes. Then reboot and start it 25 lower as a rough start. One example runs stable at 1900 memory without overvolting. Then once you find the maximum memory clock speed, you need to find the sweet spot engine clock speed that matches it. It's a fine line where one more MHz will make the hashrate drop by 20%. It's somewhere in the .57 - 0.6 ratio range. Start your engine clock speed at half your memory clock speed and then increase it by 5 at a time. The hashrate should climb a little each rise in engine speed and then suddenly drop above a certain value. Decrease it by 1 then until you find it climbs dramatically. If your engine clock speed cannot get that high without crashing the GPU, you will have to use a lower memclock. Then, and only then, bother trying to increase intensity further. My final settings were: --gpu-engine 1141 --gpu-memclock 1875 -I 20 for a hashrate of 745kH. Note I did not bother setting a thread concurrency. Once you have the magic endpoint, look at what tc was chosen by the bin file generated and then hard code that in next time (eg --thread-concurrency 22392) as slight changes in thread concurrency will happen every time if you don't specify one, and the tc to clock ratios are critical! Your numbers will be your numbers depending on your hardware combination and OS, so don't expect to get exactly the same results! bfgminer-bfgminer-3.10.0/adl.c000066400000000000000000001267301226556647300161100ustar00rootroot00000000000000/* * Copyright 2011-2013 Con Kolivas * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef HAVE_ADL #include #include #include #ifdef HAVE_CURSES // Must be before stdbool, since pdcurses typedefs bool :/ #include #endif #include #include "miner.h" #include "ADL/adl_sdk.h" #include "compat.h" #ifndef WIN32 #include #include #include #else /* WIN32 */ #include #include #endif #include "adl_functions.h" #ifndef __stdcall #define __stdcall #endif #ifndef HAVE_CURSES #define wlogprint(...) applog(LOG_WARNING, __VA_ARGS__) #endif bool adl_active; bool opt_reorder = false; static const int opt_targettemp = 75; const int opt_overheattemp = 85; static pthread_mutex_t adl_lock; struct gpu_adapters { int iAdapterIndex; int iBusNumber; int virtual_gpu; int id; }; // Memory allocation function static void * __stdcall ADL_Main_Memory_Alloc(int iSize) { void *lpBuffer = malloc(iSize); return lpBuffer; } // Optional Memory de-allocation function static void __stdcall ADL_Main_Memory_Free (void **lpBuffer) { if (*lpBuffer) { free (*lpBuffer); *lpBuffer = NULL; } } #ifndef WIN32 // equivalent functions in linux static void *GetProcAddress(void *pLibrary, const char *name) { return dlsym( pLibrary, name); } #endif static ADL_MAIN_CONTROL_CREATE ADL_Main_Control_Create; static ADL_MAIN_CONTROL_DESTROY ADL_Main_Control_Destroy; static ADL_ADAPTER_NUMBEROFADAPTERS_GET ADL_Adapter_NumberOfAdapters_Get; static ADL_ADAPTER_ADAPTERINFO_GET ADL_Adapter_AdapterInfo_Get; static ADL_ADAPTER_ID_GET ADL_Adapter_ID_Get; static ADL_OVERDRIVE5_TEMPERATURE_GET ADL_Overdrive5_Temperature_Get; static ADL_OVERDRIVE5_CURRENTACTIVITY_GET ADL_Overdrive5_CurrentActivity_Get; static ADL_OVERDRIVE5_ODPARAMETERS_GET ADL_Overdrive5_ODParameters_Get; static ADL_OVERDRIVE5_FANSPEEDINFO_GET ADL_Overdrive5_FanSpeedInfo_Get; static ADL_OVERDRIVE5_FANSPEED_GET ADL_Overdrive5_FanSpeed_Get; static ADL_OVERDRIVE5_FANSPEED_SET ADL_Overdrive5_FanSpeed_Set; static ADL_OVERDRIVE5_ODPERFORMANCELEVELS_GET ADL_Overdrive5_ODPerformanceLevels_Get; static ADL_OVERDRIVE5_ODPERFORMANCELEVELS_SET ADL_Overdrive5_ODPerformanceLevels_Set; static ADL_MAIN_CONTROL_REFRESH ADL_Main_Control_Refresh; static ADL_OVERDRIVE5_POWERCONTROL_GET ADL_Overdrive5_PowerControl_Get; static ADL_OVERDRIVE5_POWERCONTROL_SET ADL_Overdrive5_PowerControl_Set; static ADL_OVERDRIVE5_FANSPEEDTODEFAULT_SET ADL_Overdrive5_FanSpeedToDefault_Set; #ifndef WIN32 static void *hDLL; // Handle to .so library #else /* WIN32 */ HINSTANCE hDLL; // Handle to DLL #endif static int iNumberAdapters; static LPAdapterInfo lpInfo = NULL; int set_fanspeed(int gpu, int iFanSpeed); static float __gpu_temp(struct gpu_adl *ga); static inline void lock_adl(void) { mutex_lock(&adl_lock); } static inline void unlock_adl(void) { mutex_unlock(&adl_lock); } /* This looks for the twin GPU that has the fanspeed control of a non fanspeed * control GPU on dual GPU cards */ static bool fanspeed_twin(struct gpu_adl *ga, struct gpu_adl *other_ga) { if (!other_ga->has_fanspeed) return false; if (abs(ga->iBusNumber - other_ga->iBusNumber) != 1) return false; if (strcmp(ga->strAdapterName, other_ga->strAdapterName)) return false; return true; } static bool prepare_adl(void) { int result; #if defined(WIN32) || defined(__CYGWIN__) # ifdef __CYGWIN__ # define LoadLibrary(x) dlopen(x, RTLD_LAZY|RTLD_GLOBAL); # endif hDLL = LoadLibrary("atiadlxx.dll"); if (hDLL == NULL) // A 32 bit calling application on 64 bit OS will fail to LoadLIbrary. // Try to load the 32 bit library (atiadlxy.dll) instead hDLL = LoadLibrary("atiadlxy.dll"); #else hDLL = dlopen( "libatiadlxx.so", RTLD_LAZY|RTLD_GLOBAL); #endif if (hDLL == NULL) { applog(LOG_INFO, "Unable to load ati adl library"); return false; } ADL_Main_Control_Create = (ADL_MAIN_CONTROL_CREATE) GetProcAddress(hDLL,"ADL_Main_Control_Create"); ADL_Main_Control_Destroy = (ADL_MAIN_CONTROL_DESTROY) GetProcAddress(hDLL,"ADL_Main_Control_Destroy"); ADL_Adapter_NumberOfAdapters_Get = (ADL_ADAPTER_NUMBEROFADAPTERS_GET) GetProcAddress(hDLL,"ADL_Adapter_NumberOfAdapters_Get"); ADL_Adapter_AdapterInfo_Get = (ADL_ADAPTER_ADAPTERINFO_GET) GetProcAddress(hDLL,"ADL_Adapter_AdapterInfo_Get"); ADL_Adapter_ID_Get = (ADL_ADAPTER_ID_GET) GetProcAddress(hDLL,"ADL_Adapter_ID_Get"); ADL_Overdrive5_Temperature_Get = (ADL_OVERDRIVE5_TEMPERATURE_GET) GetProcAddress(hDLL,"ADL_Overdrive5_Temperature_Get"); ADL_Overdrive5_CurrentActivity_Get = (ADL_OVERDRIVE5_CURRENTACTIVITY_GET) GetProcAddress(hDLL, "ADL_Overdrive5_CurrentActivity_Get"); ADL_Overdrive5_ODParameters_Get = (ADL_OVERDRIVE5_ODPARAMETERS_GET) GetProcAddress(hDLL, "ADL_Overdrive5_ODParameters_Get"); ADL_Overdrive5_FanSpeedInfo_Get = (ADL_OVERDRIVE5_FANSPEEDINFO_GET) GetProcAddress(hDLL, "ADL_Overdrive5_FanSpeedInfo_Get"); ADL_Overdrive5_FanSpeed_Get = (ADL_OVERDRIVE5_FANSPEED_GET) GetProcAddress(hDLL, "ADL_Overdrive5_FanSpeed_Get"); ADL_Overdrive5_FanSpeed_Set = (ADL_OVERDRIVE5_FANSPEED_SET) GetProcAddress(hDLL, "ADL_Overdrive5_FanSpeed_Set"); ADL_Overdrive5_ODPerformanceLevels_Get = (ADL_OVERDRIVE5_ODPERFORMANCELEVELS_GET) GetProcAddress(hDLL, "ADL_Overdrive5_ODPerformanceLevels_Get"); ADL_Overdrive5_ODPerformanceLevels_Set = (ADL_OVERDRIVE5_ODPERFORMANCELEVELS_SET) GetProcAddress(hDLL, "ADL_Overdrive5_ODPerformanceLevels_Set"); ADL_Main_Control_Refresh = (ADL_MAIN_CONTROL_REFRESH) GetProcAddress(hDLL, "ADL_Main_Control_Refresh"); ADL_Overdrive5_PowerControl_Get = (ADL_OVERDRIVE5_POWERCONTROL_GET) GetProcAddress(hDLL, "ADL_Overdrive5_PowerControl_Get"); ADL_Overdrive5_PowerControl_Set = (ADL_OVERDRIVE5_POWERCONTROL_SET) GetProcAddress(hDLL, "ADL_Overdrive5_PowerControl_Set"); ADL_Overdrive5_FanSpeedToDefault_Set = (ADL_OVERDRIVE5_FANSPEEDTODEFAULT_SET) GetProcAddress(hDLL, "ADL_Overdrive5_FanSpeedToDefault_Set"); if (!ADL_Main_Control_Create || !ADL_Main_Control_Destroy || !ADL_Adapter_NumberOfAdapters_Get || !ADL_Adapter_AdapterInfo_Get || !ADL_Adapter_ID_Get || !ADL_Overdrive5_Temperature_Get || !ADL_Overdrive5_CurrentActivity_Get || !ADL_Overdrive5_ODParameters_Get || !ADL_Overdrive5_FanSpeedInfo_Get || !ADL_Overdrive5_FanSpeed_Get || !ADL_Overdrive5_FanSpeed_Set || !ADL_Overdrive5_ODPerformanceLevels_Get || !ADL_Overdrive5_ODPerformanceLevels_Set || !ADL_Main_Control_Refresh || !ADL_Overdrive5_PowerControl_Get || !ADL_Overdrive5_PowerControl_Set || !ADL_Overdrive5_FanSpeedToDefault_Set) { applog(LOG_WARNING, "ATI ADL's API is missing"); return false; } // Initialise ADL. The second parameter is 1, which means: // retrieve adapter information only for adapters that are physically present and enabled in the system result = ADL_Main_Control_Create (ADL_Main_Memory_Alloc, 1); if (result != ADL_OK) { applog(LOG_INFO, "ADL Initialisation Error! Error %d!", result); return false; } result = ADL_Main_Control_Refresh(); if (result != ADL_OK) { applog(LOG_INFO, "ADL Refresh Error! Error %d!", result); return false; } return true; } void init_adl(int nDevs) { int result, i, j, devices = 0, last_adapter = -1, gpu = 0, dummy = 0; struct gpu_adapters adapters[MAX_GPUDEVICES], vadapters[MAX_GPUDEVICES]; bool devs_match = true; if (unlikely(pthread_mutex_init(&adl_lock, NULL))) { applog(LOG_ERR, "Failed to init adl_lock in init_adl"); return; } if (!prepare_adl()) return; // Obtain the number of adapters for the system result = ADL_Adapter_NumberOfAdapters_Get (&iNumberAdapters); if (result != ADL_OK) { applog(LOG_INFO, "Cannot get the number of adapters! Error %d!", result); return ; } if (iNumberAdapters > 0) { lpInfo = malloc ( sizeof (AdapterInfo) * iNumberAdapters ); memset ( lpInfo,'\0', sizeof (AdapterInfo) * iNumberAdapters ); lpInfo->iSize = sizeof(lpInfo); // Get the AdapterInfo structure for all adapters in the system result = ADL_Adapter_AdapterInfo_Get (lpInfo, sizeof (AdapterInfo) * iNumberAdapters); if (result != ADL_OK) { applog(LOG_INFO, "ADL_Adapter_AdapterInfo_Get Error! Error %d", result); return ; } } else { applog(LOG_INFO, "No adapters found"); return; } /* Iterate over iNumberAdapters and find the lpAdapterID of real devices */ for (i = 0; i < iNumberAdapters; i++) { int iAdapterIndex; int lpAdapterID; iAdapterIndex = lpInfo[i].iAdapterIndex; /* Get unique identifier of the adapter, 0 means not AMD */ result = ADL_Adapter_ID_Get(iAdapterIndex, &lpAdapterID); if (result != ADL_OK) { applog(LOG_INFO, "Failed to ADL_Adapter_ID_Get. Error %d", result); if (result == -10) applog(LOG_INFO, "This error says the device is not enabled"); } else /* Each adapter may have multiple entries */ if (lpAdapterID == last_adapter) continue; else if (!lpAdapterID) applog(LOG_INFO, "Adapter returns ID 0 meaning not AMD. Card order might be confused"); else last_adapter = lpAdapterID; applog(LOG_DEBUG, "GPU %d " "iAdapterIndex %d " "strUDID %s " "iBusNumber %d " "iDeviceNumber %d " "iFunctionNumber %d " "iVendorID %d " "strAdapterName %s ", devices, iAdapterIndex, lpInfo[i].strUDID, lpInfo[i].iBusNumber, lpInfo[i].iDeviceNumber, lpInfo[i].iFunctionNumber, lpInfo[i].iVendorID, lpInfo[i].strAdapterName); adapters[devices].iAdapterIndex = iAdapterIndex; adapters[devices].iBusNumber = lpInfo[i].iBusNumber; adapters[devices].id = i; /* We found a truly new adapter instead of a logical * one. Now since there's no way of correlating the * opencl enumerated devices and the ADL enumerated * ones, we have to assume they're in the same order.*/ if (++devices > nDevs && devs_match) { applog(LOG_ERR, "ADL found more devices than opencl!"); applog(LOG_ERR, "There is possibly at least one GPU that doesn't support OpenCL"); applog(LOG_ERR, "Use the gpu map feature to reliably map OpenCL to ADL"); devs_match = false; } } if (devices < nDevs) { applog(LOG_ERR, "ADL found less devices than opencl!"); applog(LOG_ERR, "There is possibly more than one display attached to a GPU"); applog(LOG_ERR, "Use the gpu map feature to reliably map OpenCL to ADL"); devs_match = false; } for (i = 0; i < devices; i++) { vadapters[i].virtual_gpu = i; vadapters[i].id = adapters[i].id; } /* Apply manually provided OpenCL to ADL mapping, if any */ for (i = 0; i < nDevs; i++) { if (gpus[i].mapped) { vadapters[gpus[i].virtual_adl].virtual_gpu = i; applog(LOG_INFO, "Mapping OpenCL device %d to ADL device %d", i, gpus[i].virtual_adl); } else gpus[i].virtual_adl = i; } if (!devs_match) { applog(LOG_ERR, "WARNING: Number of OpenCL and ADL devices did not match!"); applog(LOG_ERR, "Hardware monitoring may NOT match up with devices!"); } else if (opt_reorder) { /* Windows has some kind of random ordering for bus number IDs and * ordering the GPUs according to ascending order fixes it. Linux * has usually sequential but decreasing order instead! */ for (i = 0; i < devices; i++) { int j, virtual_gpu; virtual_gpu = 0; for (j = 0; j < devices; j++) { if (i == j) continue; #ifdef WIN32 if (adapters[j].iBusNumber < adapters[i].iBusNumber) #else if (adapters[j].iBusNumber > adapters[i].iBusNumber) #endif virtual_gpu++; } if (virtual_gpu != i) { applog(LOG_INFO, "Mapping device %d to GPU %d according to Bus Number order", i, virtual_gpu); vadapters[virtual_gpu].virtual_gpu = i; vadapters[virtual_gpu].id = adapters[i].id; } } } if (devices > nDevs) devices = nDevs; for (gpu = 0; gpu < devices; gpu++) { struct gpu_adl *ga; int iAdapterIndex; int lpAdapterID; ADLODPerformanceLevels *lpOdPerformanceLevels; int lev, adlGpu; adlGpu = gpus[gpu].virtual_adl; i = vadapters[adlGpu].id; iAdapterIndex = lpInfo[i].iAdapterIndex; gpus[gpu].virtual_gpu = vadapters[adlGpu].virtual_gpu; /* Get unique identifier of the adapter, 0 means not AMD */ result = ADL_Adapter_ID_Get(iAdapterIndex, &lpAdapterID); if (result != ADL_OK) { applog(LOG_INFO, "Failed to ADL_Adapter_ID_Get. Error %d", result); lpAdapterID = -1; } if (gpus[gpu].deven == DEV_DISABLED) { gpus[gpu].gpu_engine = gpus[gpu].gpu_memclock = gpus[gpu].gpu_vddc = gpus[gpu].gpu_fan = gpus[gpu].gpu_powertune = 0; continue; } applog(LOG_INFO, "GPU %d %s hardware monitoring enabled", gpu, lpInfo[i].strAdapterName); if (gpus[gpu].name) free((void*)gpus[gpu].name); gpus[gpu].name = lpInfo[i].strAdapterName; gpus[gpu].has_adl = true; /* Flag adl as active if any card is successfully activated */ adl_active = true; /* From here on we know this device is a discrete device and * should support ADL */ ga = &gpus[gpu].adl; ga->gpu = gpu; ga->iAdapterIndex = iAdapterIndex; ga->lpAdapterID = lpAdapterID; strcpy(ga->strAdapterName, lpInfo[i].strAdapterName); ga->DefPerfLev = NULL; ga->twin = NULL; ga->lpOdParameters.iSize = sizeof(ADLODParameters); if (ADL_Overdrive5_ODParameters_Get(iAdapterIndex, &ga->lpOdParameters) != ADL_OK) applog(LOG_INFO, "Failed to ADL_Overdrive5_ODParameters_Get"); lev = ga->lpOdParameters.iNumberOfPerformanceLevels - 1; /* We're only interested in the top performance level */ lpOdPerformanceLevels = malloc(sizeof(ADLODPerformanceLevels) + (lev * sizeof(ADLODPerformanceLevel))); lpOdPerformanceLevels->iSize = sizeof(ADLODPerformanceLevels) + sizeof(ADLODPerformanceLevel) * lev; /* Get default performance levels first */ if (ADL_Overdrive5_ODPerformanceLevels_Get(iAdapterIndex, 1, lpOdPerformanceLevels) != ADL_OK) applog(LOG_INFO, "Failed to ADL_Overdrive5_ODPerformanceLevels_Get"); /* Set the limits we'd use based on default gpu speeds */ ga->maxspeed = ga->minspeed = lpOdPerformanceLevels->aLevels[lev].iEngineClock; ga->lpTemperature.iSize = sizeof(ADLTemperature); ga->lpFanSpeedInfo.iSize = sizeof(ADLFanSpeedInfo); ga->lpFanSpeedValue.iSize = ga->DefFanSpeedValue.iSize = sizeof(ADLFanSpeedValue); /* Now get the current performance levels for any existing overclock */ ADL_Overdrive5_ODPerformanceLevels_Get(iAdapterIndex, 0, lpOdPerformanceLevels); /* Save these values as the defaults in case we wish to reset to defaults */ ga->DefPerfLev = lpOdPerformanceLevels; if (gpus[gpu].gpu_engine) { int setengine = gpus[gpu].gpu_engine * 100; /* Lower profiles can't have a higher setting */ for (j = 0; j < lev; j++) { if (lpOdPerformanceLevels->aLevels[j].iEngineClock > setengine) lpOdPerformanceLevels->aLevels[j].iEngineClock = setengine; } lpOdPerformanceLevels->aLevels[lev].iEngineClock = setengine; applog(LOG_INFO, "Setting GPU %d engine clock to %d", gpu, gpus[gpu].gpu_engine); ADL_Overdrive5_ODPerformanceLevels_Set(iAdapterIndex, lpOdPerformanceLevels); ga->maxspeed = setengine; if (gpus[gpu].min_engine) ga->minspeed = gpus[gpu].min_engine * 100; ga->managed = true; if (gpus[gpu].gpu_memdiff) set_memoryclock(gpu, gpus[gpu].gpu_engine + gpus[gpu].gpu_memdiff); } if (gpus[gpu].gpu_memclock) { int setmem = gpus[gpu].gpu_memclock * 100; for (j = 0; j < lev; j++) { if (lpOdPerformanceLevels->aLevels[j].iMemoryClock > setmem) lpOdPerformanceLevels->aLevels[j].iMemoryClock = setmem; } lpOdPerformanceLevels->aLevels[lev].iMemoryClock = setmem; applog(LOG_INFO, "Setting GPU %d memory clock to %d", gpu, gpus[gpu].gpu_memclock); ADL_Overdrive5_ODPerformanceLevels_Set(iAdapterIndex, lpOdPerformanceLevels); ga->managed = true; } if (gpus[gpu].gpu_vddc) { int setv = gpus[gpu].gpu_vddc * 1000; for (j = 0; j < lev; j++) { if (lpOdPerformanceLevels->aLevels[j].iVddc > setv) lpOdPerformanceLevels->aLevels[j].iVddc = setv; } lpOdPerformanceLevels->aLevels[lev].iVddc = setv; applog(LOG_INFO, "Setting GPU %d voltage to %.3f", gpu, gpus[gpu].gpu_vddc); ADL_Overdrive5_ODPerformanceLevels_Set(iAdapterIndex, lpOdPerformanceLevels); ga->managed = true; } ADL_Overdrive5_ODPerformanceLevels_Get(iAdapterIndex, 0, lpOdPerformanceLevels); ga->iEngineClock = lpOdPerformanceLevels->aLevels[lev].iEngineClock; ga->iMemoryClock = lpOdPerformanceLevels->aLevels[lev].iMemoryClock; ga->iVddc = lpOdPerformanceLevels->aLevels[lev].iVddc; ga->iBusNumber = lpInfo[i].iBusNumber; if (ADL_Overdrive5_FanSpeedInfo_Get(iAdapterIndex, 0, &ga->lpFanSpeedInfo) != ADL_OK) applog(LOG_INFO, "Failed to ADL_Overdrive5_FanSpeedInfo_Get"); else ga->has_fanspeed = true; /* Save the fanspeed values as defaults in case we reset later */ ga->DefFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_RPM; ADL_Overdrive5_FanSpeed_Get(ga->iAdapterIndex, 0, &ga->DefFanSpeedValue); if (gpus[gpu].gpu_fan) set_fanspeed(gpu, gpus[gpu].gpu_fan); else gpus[gpu].gpu_fan = 85; /* Set a nominal upper limit of 85% */ /* Not fatal if powercontrol get fails */ if (ADL_Overdrive5_PowerControl_Get(ga->iAdapterIndex, &ga->iPercentage, &dummy) != ADL_OK) applog(LOG_INFO, "Failed to ADL_Overdrive5_PowerControl_get"); if (gpus[gpu].gpu_powertune) { ADL_Overdrive5_PowerControl_Set(ga->iAdapterIndex, gpus[gpu].gpu_powertune); ADL_Overdrive5_PowerControl_Get(ga->iAdapterIndex, &ga->iPercentage, &dummy); ga->managed = true; } /* Set some default temperatures for autotune when enabled */ if (!gpus[gpu].targettemp) gpus[gpu].targettemp = opt_targettemp; if (!ga->overtemp) ga->overtemp = opt_overheattemp; if (!gpus[gpu].cutofftemp) gpus[gpu].cutofftemp = opt_cutofftemp; if (opt_autofan) { /* Set a safe starting default if we're automanaging fan speeds */ int nominal = 50; ga->autofan = true; /* Clamp fanspeed values to range provided */ if (nominal > gpus[gpu].gpu_fan) nominal = gpus[gpu].gpu_fan; if (nominal < gpus[gpu].min_fan) nominal = gpus[gpu].min_fan; set_fanspeed(gpu, nominal); } if (opt_autoengine) { ga->autoengine = true; ga->managed = true; } gpus[gpu].temp = ga->lasttemp = __gpu_temp(ga); } for (gpu = 0; gpu < devices; gpu++) { struct gpu_adl *ga = &gpus[gpu].adl; int j; for (j = 0; j < devices; j++) { struct gpu_adl *other_ga; if (j == gpu) continue; other_ga = &gpus[j].adl; /* Search for twin GPUs on a single card. They will be * separated by one bus id and one will have fanspeed * while the other won't. */ if (!ga->has_fanspeed) { if (fanspeed_twin(ga, other_ga)) { applog(LOG_INFO, "Dual GPUs detected: %d and %d", ga->gpu, other_ga->gpu); ga->twin = other_ga; other_ga->twin = ga; } } } } } static float __gpu_temp(struct gpu_adl *ga) { if (ADL_Overdrive5_Temperature_Get(ga->iAdapterIndex, 0, &ga->lpTemperature) != ADL_OK) return -1; return (float)ga->lpTemperature.iTemperature / 1000; } float gpu_temp(int gpu) { struct gpu_adl *ga; float ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); ret = __gpu_temp(ga); unlock_adl(); gpus[gpu].temp = ret; return ret; } static inline int __gpu_engineclock(struct gpu_adl *ga) { return ga->lpActivity.iEngineClock / 100; } int gpu_engineclock(int gpu) { struct gpu_adl *ga; int ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); if (ADL_Overdrive5_CurrentActivity_Get(ga->iAdapterIndex, &ga->lpActivity) != ADL_OK) goto out; ret = __gpu_engineclock(ga); out: unlock_adl(); return ret; } static inline int __gpu_memclock(struct gpu_adl *ga) { return ga->lpActivity.iMemoryClock / 100; } int gpu_memclock(int gpu) { struct gpu_adl *ga; int ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); if (ADL_Overdrive5_CurrentActivity_Get(ga->iAdapterIndex, &ga->lpActivity) != ADL_OK) goto out; ret = __gpu_memclock(ga); out: unlock_adl(); return ret; } static inline float __gpu_vddc(struct gpu_adl *ga) { return (float)ga->lpActivity.iVddc / 1000; } float gpu_vddc(int gpu) { struct gpu_adl *ga; float ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); if (ADL_Overdrive5_CurrentActivity_Get(ga->iAdapterIndex, &ga->lpActivity) != ADL_OK) goto out; ret = __gpu_vddc(ga); out: unlock_adl(); return ret; } static inline int __gpu_activity(struct gpu_adl *ga) { if (!ga->lpOdParameters.iActivityReportingSupported) return -1; return ga->lpActivity.iActivityPercent; } int gpu_activity(int gpu) { struct gpu_adl *ga; int ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); ret = ADL_Overdrive5_CurrentActivity_Get(ga->iAdapterIndex, &ga->lpActivity); unlock_adl(); if (ret != ADL_OK) return ret; if (!ga->lpOdParameters.iActivityReportingSupported) return ret; return ga->lpActivity.iActivityPercent; } static inline int __gpu_fanspeed(struct gpu_adl *ga) { if (!ga->has_fanspeed && ga->twin) return __gpu_fanspeed(ga->twin); if (!(ga->lpFanSpeedInfo.iFlags & ADL_DL_FANCTRL_SUPPORTS_RPM_READ)) return -1; ga->lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_RPM; if (ADL_Overdrive5_FanSpeed_Get(ga->iAdapterIndex, 0, &ga->lpFanSpeedValue) != ADL_OK) return -1; return ga->lpFanSpeedValue.iFanSpeed; } int gpu_fanspeed(int gpu) { struct gpu_adl *ga; int ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); ret = __gpu_fanspeed(ga); unlock_adl(); return ret; } static int __gpu_fanpercent(struct gpu_adl *ga) { if (!ga->has_fanspeed && ga->twin) return __gpu_fanpercent(ga->twin); if (!(ga->lpFanSpeedInfo.iFlags & ADL_DL_FANCTRL_SUPPORTS_PERCENT_READ )) return -1; ga->lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_PERCENT; if (ADL_Overdrive5_FanSpeed_Get(ga->iAdapterIndex, 0, &ga->lpFanSpeedValue) != ADL_OK) return -1; return ga->lpFanSpeedValue.iFanSpeed; } int gpu_fanpercent(int gpu) { struct gpu_adl *ga; int ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); ret = __gpu_fanpercent(ga); unlock_adl(); return ret; } static inline int __gpu_powertune(struct gpu_adl *ga) { int dummy = 0; if (ADL_Overdrive5_PowerControl_Get(ga->iAdapterIndex, &ga->iPercentage, &dummy) != ADL_OK) return -1; return ga->iPercentage; } int gpu_powertune(int gpu) { struct gpu_adl *ga; int ret = -1; if (!gpus[gpu].has_adl || !adl_active) return ret; ga = &gpus[gpu].adl; lock_adl(); ret = __gpu_powertune(ga); unlock_adl(); return ret; } bool gpu_stats(int gpu, float *temp, int *engineclock, int *memclock, float *vddc, int *activity, int *fanspeed, int *fanpercent, int *powertune) { struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) return false; ga = &gpus[gpu].adl; lock_adl(); gpus[gpu].temp = *temp = __gpu_temp(ga); if (ADL_Overdrive5_CurrentActivity_Get(ga->iAdapterIndex, &ga->lpActivity) != ADL_OK) { *engineclock = 0; *memclock = 0; *vddc = 0; *activity = 0; } else { *engineclock = __gpu_engineclock(ga); *memclock = __gpu_memclock(ga); *vddc = __gpu_vddc(ga); *activity = __gpu_activity(ga); } *fanspeed = __gpu_fanspeed(ga); *fanpercent = __gpu_fanpercent(ga); *powertune = __gpu_powertune(ga); unlock_adl(); return true; } #ifdef HAVE_CURSES static void get_enginerange(int gpu, int *imin, int *imax) { struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Get enginerange not supported\n"); return; } ga = &gpus[gpu].adl; *imin = ga->lpOdParameters.sEngineClock.iMin / 100; *imax = ga->lpOdParameters.sEngineClock.iMax / 100; } #endif int set_engineclock(int gpu, int iEngineClock) { ADLODPerformanceLevels *lpOdPerformanceLevels; struct cgpu_info *cgpu; int i, lev, ret = 1; struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Set engineclock not supported\n"); return ret; } iEngineClock *= 100; ga = &gpus[gpu].adl; /* Keep track of intended engine clock in case the device changes * profile and drops while idle, not taking the new engine clock */ ga->lastengine = iEngineClock; lev = ga->lpOdParameters.iNumberOfPerformanceLevels - 1; lpOdPerformanceLevels = alloca(sizeof(ADLODPerformanceLevels) + (lev * sizeof(ADLODPerformanceLevel))); lpOdPerformanceLevels->iSize = sizeof(ADLODPerformanceLevels) + sizeof(ADLODPerformanceLevel) * lev; lock_adl(); if (ADL_Overdrive5_ODPerformanceLevels_Get(ga->iAdapterIndex, 0, lpOdPerformanceLevels) != ADL_OK) goto out; for (i = 0; i < lev; i++) { if (lpOdPerformanceLevels->aLevels[i].iEngineClock > iEngineClock) lpOdPerformanceLevels->aLevels[i].iEngineClock = iEngineClock; } lpOdPerformanceLevels->aLevels[lev].iEngineClock = iEngineClock; ADL_Overdrive5_ODPerformanceLevels_Set(ga->iAdapterIndex, lpOdPerformanceLevels); ADL_Overdrive5_ODPerformanceLevels_Get(ga->iAdapterIndex, 0, lpOdPerformanceLevels); if (lpOdPerformanceLevels->aLevels[lev].iEngineClock == iEngineClock) ret = 0; ga->iEngineClock = lpOdPerformanceLevels->aLevels[lev].iEngineClock; if (ga->iEngineClock > ga->maxspeed) ga->maxspeed = ga->iEngineClock; if (ga->iEngineClock < ga->minspeed) ga->minspeed = ga->iEngineClock; ga->iMemoryClock = lpOdPerformanceLevels->aLevels[lev].iMemoryClock; ga->iVddc = lpOdPerformanceLevels->aLevels[lev].iVddc; ga->managed = true; out: unlock_adl(); cgpu = &gpus[gpu]; if (cgpu->gpu_memdiff) set_memoryclock(gpu, iEngineClock / 100 + cgpu->gpu_memdiff); return ret; } #ifdef HAVE_CURSES static void get_memoryrange(int gpu, int *imin, int *imax) { struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Get memoryrange not supported\n"); return; } ga = &gpus[gpu].adl; *imin = ga->lpOdParameters.sMemoryClock.iMin / 100; *imax = ga->lpOdParameters.sMemoryClock.iMax / 100; } #endif int set_memoryclock(int gpu, int iMemoryClock) { ADLODPerformanceLevels *lpOdPerformanceLevels; int i, lev, ret = 1; struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Set memoryclock not supported\n"); return ret; } gpus[gpu].gpu_memclock = iMemoryClock; iMemoryClock *= 100; ga = &gpus[gpu].adl; lev = ga->lpOdParameters.iNumberOfPerformanceLevels - 1; lpOdPerformanceLevels = alloca(sizeof(ADLODPerformanceLevels) + (lev * sizeof(ADLODPerformanceLevel))); lpOdPerformanceLevels->iSize = sizeof(ADLODPerformanceLevels) + sizeof(ADLODPerformanceLevel) * lev; lock_adl(); if (ADL_Overdrive5_ODPerformanceLevels_Get(ga->iAdapterIndex, 0, lpOdPerformanceLevels) != ADL_OK) goto out; lpOdPerformanceLevels->aLevels[lev].iMemoryClock = iMemoryClock; for (i = 0; i < lev; i++) { if (lpOdPerformanceLevels->aLevels[i].iMemoryClock > iMemoryClock) lpOdPerformanceLevels->aLevels[i].iMemoryClock = iMemoryClock; } ADL_Overdrive5_ODPerformanceLevels_Set(ga->iAdapterIndex, lpOdPerformanceLevels); ADL_Overdrive5_ODPerformanceLevels_Get(ga->iAdapterIndex, 0, lpOdPerformanceLevels); if (lpOdPerformanceLevels->aLevels[lev].iMemoryClock == iMemoryClock) ret = 0; ga->iEngineClock = lpOdPerformanceLevels->aLevels[lev].iEngineClock; ga->iMemoryClock = lpOdPerformanceLevels->aLevels[lev].iMemoryClock; ga->iVddc = lpOdPerformanceLevels->aLevels[lev].iVddc; ga->managed = true; out: unlock_adl(); return ret; } #ifdef HAVE_CURSES static void get_vddcrange(int gpu, float *imin, float *imax) { struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Get vddcrange not supported\n"); return; } ga = &gpus[gpu].adl; *imin = (float)ga->lpOdParameters.sVddc.iMin / 1000; *imax = (float)ga->lpOdParameters.sVddc.iMax / 1000; } static float curses_float(const char *query) { float ret; char *cvar; cvar = curses_input(query); if (unlikely(!cvar)) return -1; ret = atof(cvar); free(cvar); return ret; } #endif int set_vddc(int gpu, float fVddc) { ADLODPerformanceLevels *lpOdPerformanceLevels; int i, iVddc, lev, ret = 1; struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Set vddc not supported\n"); return ret; } iVddc = 1000 * fVddc; ga = &gpus[gpu].adl; lev = ga->lpOdParameters.iNumberOfPerformanceLevels - 1; lpOdPerformanceLevels = alloca(sizeof(ADLODPerformanceLevels) + (lev * sizeof(ADLODPerformanceLevel))); lpOdPerformanceLevels->iSize = sizeof(ADLODPerformanceLevels) + sizeof(ADLODPerformanceLevel) * lev; lock_adl(); if (ADL_Overdrive5_ODPerformanceLevels_Get(ga->iAdapterIndex, 0, lpOdPerformanceLevels) != ADL_OK) goto out; for (i = 0; i < lev; i++) { if (lpOdPerformanceLevels->aLevels[i].iVddc > iVddc) lpOdPerformanceLevels->aLevels[i].iVddc = iVddc; } lpOdPerformanceLevels->aLevels[lev].iVddc = iVddc; ADL_Overdrive5_ODPerformanceLevels_Set(ga->iAdapterIndex, lpOdPerformanceLevels); ADL_Overdrive5_ODPerformanceLevels_Get(ga->iAdapterIndex, 0, lpOdPerformanceLevels); if (lpOdPerformanceLevels->aLevels[lev].iVddc == iVddc) ret = 0; ga->iEngineClock = lpOdPerformanceLevels->aLevels[lev].iEngineClock; ga->iMemoryClock = lpOdPerformanceLevels->aLevels[lev].iMemoryClock; ga->iVddc = lpOdPerformanceLevels->aLevels[lev].iVddc; ga->managed = true; out: unlock_adl(); return ret; } static void get_fanrange(int gpu, int *imin, int *imax) { struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Get fanrange not supported\n"); return; } ga = &gpus[gpu].adl; *imin = ga->lpFanSpeedInfo.iMinPercent; *imax = ga->lpFanSpeedInfo.iMaxPercent; } int set_fanspeed(int gpu, int iFanSpeed) { struct gpu_adl *ga; int ret = 1; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Set fanspeed not supported\n"); return ret; } ga = &gpus[gpu].adl; if (!(ga->lpFanSpeedInfo.iFlags & (ADL_DL_FANCTRL_SUPPORTS_RPM_WRITE | ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE ))) { applog(LOG_DEBUG, "GPU %d doesn't support rpm or percent write", gpu); return ret; } /* Store what fanspeed we're actually aiming for for re-entrant changes * in case this device does not support fine setting changes */ ga->targetfan = iFanSpeed; lock_adl(); ga->lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_RPM; if (ADL_Overdrive5_FanSpeed_Get(ga->iAdapterIndex, 0, &ga->lpFanSpeedValue) != ADL_OK) { applog(LOG_DEBUG, "GPU %d call to fanspeed get failed", gpu); } if (!(ga->lpFanSpeedValue.iFlags & ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED)) { /* If user defined is not already specified, set it first */ ga->lpFanSpeedValue.iFlags |= ADL_DL_FANCTRL_FLAG_USER_DEFINED_SPEED; ADL_Overdrive5_FanSpeed_Set(ga->iAdapterIndex, 0, &ga->lpFanSpeedValue); } if (!(ga->lpFanSpeedInfo.iFlags & ADL_DL_FANCTRL_SUPPORTS_PERCENT_WRITE)) { /* Must convert speed to an RPM */ iFanSpeed = ga->lpFanSpeedInfo.iMaxRPM * iFanSpeed / 100; ga->lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_RPM; } else ga->lpFanSpeedValue.iSpeedType = ADL_DL_FANCTRL_SPEED_TYPE_PERCENT; ga->lpFanSpeedValue.iFanSpeed = iFanSpeed; ret = ADL_Overdrive5_FanSpeed_Set(ga->iAdapterIndex, 0, &ga->lpFanSpeedValue); ga->managed = true; unlock_adl(); return ret; } #ifdef HAVE_CURSES static int set_powertune(int gpu, int iPercentage) { struct gpu_adl *ga; int dummy, ret = 1; if (!gpus[gpu].has_adl || !adl_active) { wlogprint("Set powertune not supported\n"); return ret; } ga = &gpus[gpu].adl; lock_adl(); ADL_Overdrive5_PowerControl_Set(ga->iAdapterIndex, iPercentage); ADL_Overdrive5_PowerControl_Get(ga->iAdapterIndex, &ga->iPercentage, &dummy); if (ga->iPercentage == iPercentage) ret = 0; ga->managed = true; unlock_adl(); return ret; } #endif /* Returns whether the fanspeed is optimal already or not. The fan_window bool * tells us whether the current fanspeed is in the target range for fanspeeds. */ static bool fan_autotune(int gpu, int temp, int fanpercent, int lasttemp, bool *fan_window) { struct cgpu_info *cgpu = &gpus[gpu]; int tdiff = round(temp - lasttemp); struct gpu_adl *ga = &cgpu->adl; int top = gpus[gpu].gpu_fan; int bot = gpus[gpu].min_fan; int newpercent = fanpercent; int iMin = 0, iMax = 100; get_fanrange(gpu, &iMin, &iMax); if (temp > ga->overtemp && fanpercent < iMax) { applog(LOG_WARNING, "Overheat detected on GPU %d, increasing fan to 100%%", gpu); newpercent = iMax; dev_error(cgpu, REASON_DEV_OVER_HEAT); } else if (temp > gpus[gpu].targettemp && fanpercent < top && tdiff >= 0) { applog(LOG_DEBUG, "Temperature over target, increasing fanspeed"); if (temp > gpus[gpu].targettemp + opt_hysteresis) newpercent = ga->targetfan + 10; else newpercent = ga->targetfan + 5; if (newpercent > top) newpercent = top; } else if (fanpercent > bot && temp < gpus[gpu].targettemp - opt_hysteresis) { /* Detect large swings of 5 degrees or more and change fan by * a proportion more */ if (tdiff <= 0) { applog(LOG_DEBUG, "Temperature %d degrees below target, decreasing fanspeed", opt_hysteresis); newpercent = ga->targetfan - 1 + tdiff / 5; } else if (tdiff >= 5) { applog(LOG_DEBUG, "Temperature climbed %d while below target, increasing fanspeed", tdiff); newpercent = ga->targetfan + tdiff / 5; } } else { /* We're in the optimal range, make minor adjustments if the * temp is still drifting */ if (fanpercent > bot && tdiff < 0 && lasttemp < gpus[gpu].targettemp) { applog(LOG_DEBUG, "Temperature dropping while in target range, decreasing fanspeed"); newpercent = ga->targetfan + tdiff; } else if (fanpercent < top && tdiff > 0 && temp > gpus[gpu].targettemp - opt_hysteresis) { applog(LOG_DEBUG, "Temperature rising while in target range, increasing fanspeed"); newpercent = ga->targetfan + tdiff; } } if (newpercent > iMax) newpercent = iMax; else if (newpercent < iMin) newpercent = iMin; if (newpercent <= top) *fan_window = true; else *fan_window = false; if (newpercent != fanpercent) { applog(LOG_INFO, "Setting GPU %d fan percentage to %d", gpu, newpercent); set_fanspeed(gpu, newpercent); /* If the fanspeed is going down and we're below the top speed, * consider the fan optimal to prevent minute changes in * fanspeed delaying GPU engine speed changes */ if (newpercent < fanpercent && *fan_window) return true; return false; } return true; } void gpu_autotune(int gpu, enum dev_enable *denable) { int temp, fanpercent, engine, newengine, twintemp = 0; bool fan_optimal = true, fan_window = true; struct cgpu_info *cgpu; struct gpu_adl *ga; cgpu = &gpus[gpu]; ga = &cgpu->adl; lock_adl(); ADL_Overdrive5_CurrentActivity_Get(ga->iAdapterIndex, &ga->lpActivity); gpus[gpu].temp = temp = __gpu_temp(ga); if (ga->twin) twintemp = __gpu_temp(ga->twin); fanpercent = __gpu_fanpercent(ga); unlock_adl(); newengine = engine = gpu_engineclock(gpu) * 100; if (temp && fanpercent >= 0 && ga->autofan) { if (!ga->twin) fan_optimal = fan_autotune(gpu, temp, fanpercent, ga->lasttemp, &fan_window); else if (ga->autofan && (ga->has_fanspeed || !ga->twin->autofan)) { /* On linked GPUs, we autotune the fan only once, based * on the highest temperature from either GPUs */ int hightemp, fan_gpu; int lasttemp; if (twintemp > temp) { lasttemp = ga->twin->lasttemp; hightemp = twintemp; } else { lasttemp = ga->lasttemp; hightemp = temp; } if (ga->has_fanspeed) fan_gpu = gpu; else fan_gpu = ga->twin->gpu; fan_optimal = fan_autotune(fan_gpu, hightemp, fanpercent, lasttemp, &fan_window); } } if (engine && ga->autoengine) { if (temp > cgpu->cutofftemp) { // Shutoff and recovery happens back in watchdog_thread newengine = ga->minspeed; } else if (temp > ga->overtemp && engine > ga->minspeed) { applog(LOG_WARNING, "Overheat detected, decreasing GPU %d clock speed", gpu); newengine = ga->minspeed; dev_error(cgpu, REASON_DEV_OVER_HEAT); } else if (temp > gpus[gpu].targettemp + opt_hysteresis && engine > ga->minspeed && fan_optimal) { applog(LOG_DEBUG, "Temperature %d degrees over target, decreasing clock speed", opt_hysteresis); newengine = engine - ga->lpOdParameters.sEngineClock.iStep; /* Only try to tune engine speed up if this GPU is not disabled */ } else if (temp < gpus[gpu].targettemp && engine < ga->maxspeed && fan_window && *denable == DEV_ENABLED) { int iStep = ga->lpOdParameters.sEngineClock.iStep; applog(LOG_DEBUG, "Temperature below target, increasing clock speed"); if (temp < gpus[gpu].targettemp - opt_hysteresis) iStep *= 2; newengine = engine + iStep; } if (newengine > ga->maxspeed) newengine = ga->maxspeed; else if (newengine < ga->minspeed) newengine = ga->minspeed; /* Adjust engine clock speed if it's lower, or if it's higher * but higher than the last intended value as well as the * current speed, to avoid setting the engine clock speed to * a speed relateive to a lower profile during idle periods. */ if (newengine < engine || (newengine > engine && newengine > ga->lastengine)) { newengine /= 100; applog(LOG_INFO, "Setting GPU %d engine clock to %d", gpu, newengine); set_engineclock(gpu, newengine); } } ga->lasttemp = temp; } void set_defaultfan(int gpu) { struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) return; ga = &gpus[gpu].adl; lock_adl(); ADL_Overdrive5_FanSpeed_Set(ga->iAdapterIndex, 0, &ga->DefFanSpeedValue); unlock_adl(); } void set_defaultengine(int gpu) { struct gpu_adl *ga; if (!gpus[gpu].has_adl || !adl_active) return; ga = &gpus[gpu].adl; lock_adl(); ADL_Overdrive5_ODPerformanceLevels_Set(ga->iAdapterIndex, ga->DefPerfLev); unlock_adl(); } #ifdef HAVE_CURSES void change_autosettings(int gpu) { struct gpu_adl *ga = &gpus[gpu].adl; char input; int val; wlogprint("Target temperature: %d\n", gpus[gpu].targettemp); wlogprint("Overheat temperature: %d\n", ga->overtemp); wlogprint("Cutoff temperature: %d\n", gpus[gpu].cutofftemp); wlogprint("Toggle [F]an auto [G]PU auto\nChange [T]arget [O]verheat [C]utoff\n"); wlogprint("Or press any other key to continue\n"); input = getch(); if (!strncasecmp(&input, "f", 1)) { ga->autofan ^= true; wlogprint("Fan autotune is now %s\n", ga->autofan ? "enabled" : "disabled"); if (!ga->autofan) { wlogprint("Resetting fan to startup settings\n"); set_defaultfan(gpu); } } else if (!strncasecmp(&input, "g", 1)) { ga->autoengine ^= true; wlogprint("GPU engine clock autotune is now %s\n", ga->autoengine ? "enabled" : "disabled"); if (!ga->autoengine) { wlogprint("Resetting GPU engine clock to startup settings\n"); set_defaultengine(gpu); } } else if (!strncasecmp(&input, "t", 1)) { val = curses_int("Enter target temperature for this GPU in C (0-200)"); if (val < 0 || val > 200) wlogprint("Invalid temperature"); else gpus[gpu].targettemp = val; } else if (!strncasecmp(&input, "o", 1)) { wlogprint("Enter overheat temperature for this GPU in C (%d+)", gpus[gpu].targettemp); val = curses_int(""); if (val <= gpus[gpu].targettemp || val > 200) wlogprint("Invalid temperature"); else ga->overtemp = val; } else if (!strncasecmp(&input, "c", 1)) { wlogprint("Enter cutoff temperature for this GPU in C (%d+)", ga->overtemp); val = curses_int(""); if (val <= ga->overtemp || val > 200) wlogprint("Invalid temperature"); else gpus[gpu].cutofftemp = val; } } void change_gpusettings(int gpu) { struct gpu_adl *ga = &gpus[gpu].adl; float fval, fmin = 0, fmax = 0; int val, imin = 0, imax = 0; char input; int engineclock = 0, memclock = 0, activity = 0, fanspeed = 0, fanpercent = 0, powertune = 0; float temp = 0, vddc = 0; updated: if (gpu_stats(gpu, &temp, &engineclock, &memclock, &vddc, &activity, &fanspeed, &fanpercent, &powertune)) wlogprint("Temp: %.1f C\n", temp); if (fanpercent >= 0 || fanspeed >= 0) { wlogprint("Fan Speed: "); if (fanpercent >= 0) wlogprint("%d%% ", fanpercent); if (fanspeed >= 0) wlogprint("(%d RPM)", fanspeed); wlogprint("\n"); } wlogprint("Engine Clock: %d MHz\nMemory Clock: %d MHz\nVddc: %.3f V\nActivity: %d%%\nPowertune: %d%%\n", engineclock, memclock, vddc, activity, powertune); wlogprint("Fan autotune is %s (%d-%d)\n", ga->autofan ? "enabled" : "disabled", gpus[gpu].min_fan, gpus[gpu].gpu_fan); wlogprint("GPU engine clock autotune is %s (%d-%d)\n", ga->autoengine ? "enabled" : "disabled", ga->minspeed / 100, ga->maxspeed / 100); wlogprint("Change [A]utomatic [E]ngine [F]an [M]emory [V]oltage [P]owertune\n"); wlogprint("Or press any other key to continue\n"); input = getch(); if (!strncasecmp(&input, "a", 1)) { change_autosettings(gpu); } else if (!strncasecmp(&input, "e", 1)) { get_enginerange(gpu, &imin, &imax); wlogprint("Enter GPU engine clock speed (%d - %d MHz)", imin, imax); val = curses_int(""); if (val < imin || val > imax) { wlogprint("Value is outside safe range, are you sure?\n"); input = getch(); if (strncasecmp(&input, "y", 1)) return; } if (!set_engineclock(gpu, val)) wlogprint("Driver reports success but check values below\n"); else wlogprint("Failed to modify engine clock speed\n"); } else if (!strncasecmp(&input, "f", 1)) { get_fanrange(gpu, &imin, &imax); wlogprint("Enter fan percentage (%d - %d %%)", imin, imax); val = curses_int(""); if (val < imin || val > imax) { wlogprint("Value is outside safe range, are you sure?\n"); input = getch(); if (strncasecmp(&input, "y", 1)) return; } if (!set_fanspeed(gpu, val)) wlogprint("Driver reports success but check values below\n"); else wlogprint("Failed to modify fan speed\n"); } else if (!strncasecmp(&input, "m", 1)) { get_memoryrange(gpu, &imin, &imax); wlogprint("Enter GPU memory clock speed (%d - %d MHz)", imin, imax); val = curses_int(""); if (val < imin || val > imax) { wlogprint("Value is outside safe range, are you sure?\n"); input = getch(); if (strncasecmp(&input, "y", 1)) return; } if (!set_memoryclock(gpu, val)) wlogprint("Driver reports success but check values below\n"); else wlogprint("Failed to modify memory clock speed\n"); } else if (!strncasecmp(&input, "v", 1)) { get_vddcrange(gpu, &fmin, &fmax); wlogprint("Enter GPU voltage (%.3f - %.3f V)", fmin, fmax); fval = curses_float(""); if (fval < fmin || fval > fmax) { wlogprint("Value is outside safe range, are you sure?\n"); input = getch(); if (strncasecmp(&input, "y", 1)) return; } if (!set_vddc(gpu, fval)) wlogprint("Driver reports success but check values below\n"); else wlogprint("Failed to modify voltage\n"); } else if (!strncasecmp(&input, "p", 1)) { val = curses_int("Enter powertune value (-20 - 20)"); if (val < -20 || val > 20) { wlogprint("Value is outside safe range, are you sure?\n"); input = getch(); if (strncasecmp(&input, "y", 1)) return; } if (!set_powertune(gpu, val)) wlogprint("Driver reports success but check values below\n"); else wlogprint("Failed to modify powertune value\n"); } else { clear_logwin(); return; } cgsleep_ms(1000); goto updated; } #endif static void free_adl(void) { ADL_Main_Memory_Free ((void **)&lpInfo); ADL_Main_Control_Destroy (); #ifndef WIN32 dlclose(hDLL); #else FreeLibrary(hDLL); #endif } void clear_adl(int nDevs) { struct gpu_adl *ga; int i; if (!adl_active) return; lock_adl(); /* Try to reset values to their defaults */ for (i = 0; i < nDevs; i++) { ga = &gpus[i].adl; /* Only reset the values if we've changed them at any time */ if (!gpus[i].has_adl || !ga->managed) continue; ADL_Overdrive5_ODPerformanceLevels_Set(ga->iAdapterIndex, ga->DefPerfLev); free(ga->DefPerfLev); ADL_Overdrive5_FanSpeed_Set(ga->iAdapterIndex, 0, &ga->DefFanSpeedValue); ADL_Overdrive5_FanSpeedToDefault_Set(ga->iAdapterIndex, 0); } adl_active = false; unlock_adl(); free_adl(); } #endif /* HAVE_ADL */ bfgminer-bfgminer-3.10.0/adl.h000066400000000000000000000015711226556647300161100ustar00rootroot00000000000000#ifndef __ADL_H__ #define __ADL_H__ #ifdef HAVE_ADL #include bool adl_active; bool opt_reorder; const int opt_targettemp; const int opt_overheattemp; void init_adl(int nDevs); float gpu_temp(int gpu); int gpu_engineclock(int gpu); int gpu_memclock(int gpu); float gpu_vddc(int gpu); int gpu_activity(int gpu); int gpu_fanspeed(int gpu); int gpu_fanpercent(int gpu); bool gpu_stats(int gpu, float *temp, int *engineclock, int *memclock, float *vddc, int *activity, int *fanspeed, int *fanpercent, int *powertune); void change_gpusettings(int gpu); void gpu_autotune(int gpu, enum dev_enable *denable); void clear_adl(int nDevs); #else /* HAVE_ADL */ #define adl_active (0) static inline void init_adl(__maybe_unused int nDevs) {} static inline void change_gpusettings(__maybe_unused int gpu) { } static inline void clear_adl(__maybe_unused int nDevs) {} #endif #endif bfgminer-bfgminer-3.10.0/adl_functions.h000066400000000000000000000606041226556647300202020ustar00rootroot00000000000000/******************************************************************************* * This program reads HW information from your ATI Radeon card and displays them * You can also change frequencies and voltages. * THIS PROGRAM MAY DAMAGE YOUR VIDEO CARD, IF YOU APPLY NONSENSIAL VALUES. * e.g. INCREASING THE VOLTAGES AND FREQUENCIES IN CONJUNCTION WITH LOWERING THE * FAN SPEED IS NOT ADVISABLE! * Copyright 2010-2011 Thorsten Gilling * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *******************************************************************************/ // ------------------------------------------------------------------------------------------------------------ // AMD ADL function types from Version 3.0 // ------------------------------------------------------------------------------------------------------------ #ifndef WIN32 #include //dyopen, dlsym, dlclose #include #include //memeset #else #include #include #endif #include "ADL/adl_sdk.h" // Definitions of the used function pointers. Add more if you use other ADL APIs // ------------------------------------------------------------------------------------------------------------ // ADL Main typedef int ( *ADL_MAIN_CONTROL_CREATE ) (ADL_MAIN_MALLOC_CALLBACK callback, int iEnumConnectedAdapters); typedef int ( *ADL_MAIN_CONTROL_REFRESH ) (); typedef int ( *ADL_MAIN_CONTROL_DESTROY ) (); typedef int ( *ADL_GRAPHICS_PLATFORM_GET ) (int *lpPlatForm); // ------------------------------------------------------------------------------------------------------------ // ADL Adapter/General typedef int ( *ADL_ADAPTER_ACTIVE_GET ) (int iAdapterIndex, int *lpStatus); typedef int ( *ADL_ADAPTER_NUMBEROFADAPTERS_GET ) (int *lpNumAdapters); typedef int ( *ADL_ADAPTER_ADAPTERINFO_GET ) (LPAdapterInfo lpInfo, int iInputSize); typedef int ( *ADL_ADAPTER_ASICFAMILYTYPE_GET ) (int iAdapterIndex, int *lpAsicTypes, int *lpValids); typedef int ( *ADL_ADAPTER_SPEED_CAPS ) (int iAdapterIndex, int *lpCaps, int *lpValid); typedef int ( *ADL_ADAPTER_SPEED_GET ) (int iAdapterIndex, int *lpCurrent, int *lpDefault); typedef int ( *ADL_ADAPTER_SPEED_SET ) (int iAdapterIndex, int iSpeed); typedef int ( *ADL_ADAPTER_ACCESSIBILITY_GET ) (int iAdapterIndex, int *lpAccessibility); typedef int ( *ADL_ADAPTER_VIDEOBIOSINFO_GET ) (int iAdapterIndex, ADLBiosInfo *lpBiosInfo); typedef int ( *ADL_ADAPTER_ID_GET ) (int iAdapterIndex, int *lpAdapterID); // ADL Adapter/CrossDisplay typedef int ( *ADL_ADAPTER_CROSSDISPLAYADAPTERROLE_CAPS ) (int iAdapterIndex, int *lpCrossDisplaySupport, int *lpAdapterRole, int *lpNumPossDisplayAdapters, int **lppPossDisplayAdapters, int *lpNnumPosRenderingAdapters, int **lppPosRenderingAdapters, int *lpErrorStatus); typedef int ( *ADL_ADAPTER_CROSSDISPLAYINFO_GET ) (int iAdapterIndex, int *lpAdapterRole, int *lpCrossdisplayMode, int *lpNumDisplayAdapters, int **lppDisplayAdapters, int *lpNumRenderingAdapters, int **lppRenderingAdapters, int *lpErrorCodeStatus); typedef int ( *ADL_ADAPTER_CROSSDISPLAYINFO_SET ) (int iAdapterIndex, int iDisplayAdapterIndex, int iRenderingAdapterIndex, int crossdisplayMode, int *lpErrorCode); // ADL Adapter/CrossFire typedef int ( *ADL_ADAPTER_CROSSFIRE_CAPS ) (int iAdapterIndex, int *lpPreferred, int *lpNumComb, ADLCrossfireComb **ppCrossfireComb); typedef int ( *ADL_ADAPTER_CROSSFIRE_GET ) (int iAdapterIndex, ADLCrossfireComb *lpCrossfireComb, ADLCrossfireInfo *lpCrossfireInfo); typedef int ( *ADL_ADAPTER_CROSSFIRE_SET ) (int iAdapterIndex, ADLCrossfireComb *lpCrossfireComb); // ------------------------------------------------------------------------------------------------------------ // ADL Display/Misc typedef int ( *ADL_DISPLAY_DISPLAYINFO_GET ) (int iAdapterIndex, int *lpNumDisplays, ADLDisplayInfo **lppInfo, int iForceDetect); typedef int ( *ADL_DISPLAY_NUMBEROFDISPLAYS_GET ) (int iAdapterIndex, int *lpNumDisplays); typedef int ( *ADL_DISPLAY_PRESERVEDASPECTRATIO_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSupport, int *lpCurrent, int *lpDefault); typedef int ( *ADL_DISPLAY_PRESERVEDASPECTRATIO_SET ) (int iAdapterIndex, int iDisplayIndex, int iCurrent); typedef int ( *ADL_DISPLAY_IMAGEEXPANSION_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSupport, int *lpCurrent, int *lpDefault); typedef int ( *ADL_DISPLAY_IMAGEEXPANSION_SET ) (int iAdapterIndex, int iDisplayIndex, int iCurrent); typedef int ( *ADL_DISPLAY_POSITION_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpX, int *lpY, int *lpXDefault, int *lpYDefault, int *lpMinX, int *lpMinY, int *lpMaxX, int *lpMaxY, int *lpStepX, int *lpStepY); typedef int ( *ADL_DISPLAY_POSITION_SET ) (int iAdapterIndex, int iDisplayIndex, int iX, int iY); typedef int ( *ADL_DISPLAY_SIZE_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpWidth, int *lpHeight, int *lpDefaultWidth, int *lpDefaultHeight, int *lpMinWidth, int *lpMinHeight, int *lpMaxWidth, int *lpMaxHeight, int *lpStepWidth, int *lpStepHeight); typedef int ( *ADL_DISPLAY_SIZE_SET ) (int iAdapterIndex, int iDisplayIndex, int iWidth, int iHeight); typedef int ( *ADL_DISPLAY_ADJUSTCAPS_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpInfo); typedef int ( *ADL_DISPLAY_CAPABILITIES_GET ) (int iAdapterIndex, int *lpNumberOfControlers, int *lpNumberOfDisplays); typedef int ( *ADL_DISPLAY_CONNECTEDDISPLAYS_GET ) (int iAdapterIndex, int *lpConnections); typedef int ( *ADL_DISPLAY_DEVICECONFIG_GET ) (int iAdapterIndex, int iDisplayIndex, ADLDisplayConfig *lpDisplayConfig); typedef int ( *ADL_DISPLAY_PROPERTY_GET ) (int iAdapterIndex, int iDisplayIndex, ADLDisplayProperty *lpDisplayProperty); typedef int ( *ADL_DISPLAY_PROPERTY_SET ) (int iAdapterIndex, int iDisplayIndex, ADLDisplayProperty *lpDisplayProperty); typedef int ( *ADL_DISPLAY_SWITCHINGCAPABILITY_GET ) (int iAdapterIndex, int *lpResult); typedef int ( *ADL_DISPLAY_DITHERSTATE_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpDitherState); typedef int ( *ADL_DISPLAY_DITHERSTATE_SET ) (int iAdapterIndex, int iDisplayIndex, int iDitherState); typedef int ( *ADL_DISPLAY_SUPPORTEDPIXELFORMAT_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpPixelFormat); typedef int ( *ADL_DISPLAY_PIXELFORMAT_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpPixelFormat); typedef int ( *ADL_DISPLAY_PIXELFORMAT_SET ) (int iAdapterIndex, int iDisplayIndex, int iPixelFormat); typedef int ( *ADL_DISPLAY_ODCLOCKINFO_GET ) (int iAdapterIndex, ADLAdapterODClockInfo *lpOdClockInfo); typedef int ( *ADL_DISPLAY_ODCLOCKCONFIG_SET ) (int iAdapterIndex, ADLAdapterODClockConfig *lpOdClockConfig); typedef int ( *ADL_DISPLAY_ADJUSTMENTCOHERENT_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpAdjustmentCoherentCurrent, int *lpAdjustmentCoherentDefault); typedef int ( *ADL_DISPLAY_ADJUSTMENTCOHERENT_SET ) (int iAdapterIndex, int iDisplayIndex, int iAdjustmentCoherent); typedef int ( *ADL_DISPLAY_REDUCEDBLANKING_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpReducedBlankingCurrent, int *lpReducedBlankingDefault); typedef int ( *ADL_DISPLAY_REDUCEDBLANKING_SET ) (int iAdapterIndex, int iDisplayIndex, int iReducedBlanking); typedef int ( *ADL_DISPLAY_FORMATSOVERRIDE_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSettingsSupported, int *lpSettingsSupportedEx, int *lpCurSettings); typedef int ( *ADL_DISPLAY_FORMATSOVERRIDE_SET ) (int iAdapterIndex, int iDisplayIndex, int iOverrideSettings); typedef int ( *ADL_DISPLAY_MVPUCAPS_GET ) (int iAdapterIndex, ADLMVPUCaps *lpMvpuCaps); typedef int ( *ADL_DISPLAY_MVPUSTATUS_GET ) (int iAdapterIndex, ADLMVPUStatus *lpMvpuStatus); // ADL Display/Eyefinity typedef int ( *ADL_ADAPTER_ACTIVE_SET ) (int iAdapterIndex, int iStatus, int *lpNewlyActivate); typedef int ( *ADL_ADAPTER_ACTIVE_SETPREFER ) (int iAdapterIndex, int iStatus, int iNumPreferTarget, ADLDisplayTarget *lpPreferTarget, int *lpNewlyActivate); typedef int ( *ADL_ADAPTER_PRIMARY_GET ) (int *lpPrimaryAdapterIndex); typedef int ( *ADL_ADAPTER_PRIMARY_SET ) (int iAdapterIndex); typedef int ( *ADL_ADAPTER_MODESWITCH ) (int iAdapterIndex); typedef int ( *ADL_DISPLAY_MODES_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpNumModes, ADLMode **lppModes); typedef int ( *ADL_DISPLAY_MODES_SET ) (int iAdapterIndex, int iDisplayIndex, int iNumModes, ADLMode *lpModes); typedef int ( *ADL_DISPLAY_POSSIBLEMODE_GET ) (int iAdapterIndex, int *lpNumModes, ADLMode **lppModes); typedef int ( *ADL_DISPLAY_FORCIBLEDISPLAY_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpStatus); typedef int ( *ADL_DISPLAY_FORCIBLEDISPLAY_SET ) (int iAdapterIndex, int iDisplayIndex, int iStatus); typedef int ( *ADL_ADAPTER_NUMBEROFACTIVATABLESOURCES_GET ) (int iAdapterIndex, int *lpNumSources, ADLActivatableSource **lppSources); typedef int ( *ADL_ADAPTER_DISPLAY_CAPS ) (int iAdapterIndex, int *lpNumDisplayCaps, ADLAdapterDisplayCap **lppAdapterDisplayCaps); typedef int ( *ADL_DISPLAY_DISPLAYMAPCONFIG_GET ) (int iAdapterIndex, int *lpNumDisplayMap, ADLDisplayMap **lppDisplayMap, int *lpNumDisplayTarget, ADLDisplayTarget **lppDisplayTarget, int iOptions); typedef int ( *ADL_DISPLAY_DISPLAYMAPCONFIG_SET ) (int iAdapterIndex, int iNumDisplayMap, ADLDisplayMap *lpDisplayMap, int iNumDisplayTarget, ADLDisplayTarget *lpDisplayTarget); typedef int ( *ADL_DISPLAY_POSSIBLEMAPPING_GET ) (int iAdapterIndex, int iNumberOfPresetMapping, ADLPossibleMapping *lpPresetMappings, int iEnquiryControllerIndex, int *lpNumberOfEnquiryPossibleMappings, ADLPossibleMapping **lppEnquiryPossibleMappings); typedef int ( *ADL_DISPLAY_DISPLAYMAPCONFIG_VALIDATE ) (int iAdapterIndex, int iNumPossibleMap, ADLPossibleMap *lpPossibleMaps, int *lpNumPossibleMapResult, ADLPossibleMapResult **lppPossibleMapResult); typedef int ( *ADL_DISPLAY_DISPLAYMAPCONFIG_POSSIBLEADDANDREMOVE ) (int iAdapterIndex, int iNumDisplayMap, ADLDisplayMap *lpDisplayMap, int iNumDisplayTarget, ADLDisplayTarget *lpDisplayTarget, int *lpNumPossibleAddTarget, ADLDisplayTarget **lppPossibleAddTarget, int *lpNumPossibleRemoveTarget, ADLDisplayTarget **lppPossibleRemoveTarget); typedef int ( *ADL_DISPLAY_SLSGRID_CAPS ) (int iAdapterIndex, int *lpNumSLSGrid, ADLSLSGrid **lppSLSGrid, int iOption); typedef int ( *ADL_DISPLAY_SLSMAPINDEXLIST_GET ) (int iAdapterIndex, int *lpNumSLSMapIndexList, int **lppSLSMapIndexList, int iOptions); typedef int ( *ADL_DISPLAY_SLSMAPINDEX_GET ) (int iAdapterIndex, int iADLNumDisplayTarget, ADLDisplayTarget *lpDisplayTarget, int *lpSLSMapIndex); typedef int ( *ADL_DISPLAY_SLSMAPCONFIG_GET ) (int iAdapterIndex, int iSLSMapIndex, ADLSLSMap *lpSLSMap, int *lpNumSLSTarget, ADLSLSTarget **lppSLSTarget, int *lpNumNativeMode, ADLSLSMode **lppNativeMode, int *lpNumBezelMode, ADLBezelTransientMode **lppBezelMode, int *lpNumTransientMode, ADLBezelTransientMode **lppTransientMode, int *lpNumSLSOffset, ADLSLSOffset **lppSLSOffset, int iOption); typedef int ( *ADL_DISPLAY_SLSMAPCONFIG_CREATE ) (int iAdapterIndex, ADLSLSMap SLSMap, int iNumTargetTarget, ADLSLSTarget *lpSLSTarget, int iBezelModePercent, int *lpSLSMapIndex, int iOption); typedef int ( *ADL_DISPLAY_SLSMAPCONFIG_DELETE ) (int iAdapterIndex, int iSLSMapIndex); typedef int ( *ADL_DISPLAY_SLSMAPCONFIG_SETSTATE ) (int iAdapterIndex, int iSLSMapIndex, int iState); typedef int ( *ADL_DISPLAY_SLSMAPCONFIG_REARRANGE ) (int iAdapterIndex, int iSLSMapIndex, int iNumDisplayTarget, ADLSLSTarget *lpSLSTarget, ADLSLSMap slsMap, int iOption); typedef int ( *ADL_DISPLAY_POSSIBLEMODE_WINXP_GET ) (int iAdapterIndex, int iNumDisplayTargets, ADLDisplayTarget *lpDisplayTargets, int iLargeDesktopSupportedType, int iDevicePanningControl, int *lpNumModes, ADLMode **lppModes); typedef int ( *ADL_DISPLAY_BEZELOFFSETSTEPPINGSIZE_GET ) (int iAdapterIndex, int *lpNumBezelOffsetSteppingSize, ADLBezelOffsetSteppingSize **lppBezelOffsetSteppingSize); typedef int ( *ADL_DISPLAY_BEZELOFFSET_SET ) (int iAdapterIndex, int iSLSMapIndex, int iNumBezelOffset, LPADLSLSOffset lpBezelOffset, ADLSLSMap SLSMap, int iOption); typedef int ( *ADL_DISPLAY_BEZELSUPPORTED_VALIDATE ) (int iAdapterIndex, int iNumPossibleSLSMap, LPADLPossibleSLSMap lpPossibleSLSMaps, int *lpNumPossibleSLSMapResult, LPADLPossibleMapResult *lppPossibleMapResult); // ADL Display/Color typedef int ( *ADL_DISPLAY_COLORCAPS_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpCaps, int *lpValids); typedef int ( *ADL_DISPLAY_COLOR_SET ) (int iAdapterIndex, int iDisplayIndex, int iColorType, int iCurrent); typedef int ( *ADL_DISPLAY_COLOR_GET ) (int iAdapterIndex, int iDisplayIndex, int iColorType, int *lpCurrent, int *lpDefault, int *lpMin, int *lpMax, int *lpStep); typedef int ( *ADL_DISPLAY_COLORTEMPERATURESOURCE_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpTempSource); typedef int ( *ADL_DISPLAY_COLORTEMPERATURESOURCE_SET ) (int iAdapterIndex, int iDisplayIndex, int iTempSource); // ADL Display/Timing typedef int ( *ADL_DISPLAY_MODETIMINGOVERRIDE_GET ) (int iAdapterIndex, int iDisplayIndex, ADLDisplayMode *lpModeIn, ADLDisplayModeInfo *lpModeInfoOut); typedef int ( *ADL_DISPLAY_MODETIMINGOVERRIDE_SET ) (int iAdapterIndex, int iDisplayIndex, ADLDisplayModeInfo *lpMode, int iForceUpdate); typedef int ( *ADL_DISPLAY_MODETIMINGOVERRIDELIST_GET ) (int iAdapterIndex, int iDisplayIndex, int iMaxNumOfOverrides, ADLDisplayModeInfo *lpModeInfoList, int *lpNumOfOverrides); // ADL Display/Customize typedef int ( *ADL_DISPLAY_CUSTOMIZEDMODELISTNUM_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpListNum); typedef int ( *ADL_DISPLAY_CUSTOMIZEDMODELIST_GET ) (int iAdapterIndex, int iDisplayIndex, ADLCustomMode *lpCustomModeList, int iBuffSize); typedef int ( *ADL_DISPLAY_CUSTOMIZEDMODE_ADD ) (int iAdapterIndex, int iDisplayIndex, ADLCustomMode customMode); typedef int ( *ADL_DISPLAY_CUSTOMIZEDMODE_DELETE ) (int iAdapterIndex, int iDisplayIndex, int iIndex); typedef int ( *ADL_DISPLAY_CUSTOMIZEDMODE_VALIDATE ) (int iAdapterIndex, int iDisplayIndex, ADLCustomMode customMode, int *lpValid); // ADL Display/Over-Underscan typedef int ( *ADL_DISPLAY_UNDERSCAN_SET ) (int iAdapterIndex, int iDisplayIndex, int iCurrent); typedef int ( *ADL_DISPLAY_UNDERSCAN_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpCurrent, int *lpDefault, int *lpMin, int *lpMax, int *lpStep); typedef int ( *ADL_DISPLAY_OVERSCAN_SET ) (int iAdapterIndex, int iDisplayIndex, int iCurrent); typedef int ( *ADL_DISPLAY_OVERSCAN_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpCurrent, int *lpDefualt, int *lpMin, int *lpMax, int *lpStep); // ADL Display/Overlay typedef int ( *ADL_DISPLAY_CONTROLLEROVERLAYADJUSTMENTCAPS_GET ) (int iAdapterIndex, ADLControllerOverlayInput *lpOverlayInput, ADLControllerOverlayInfo *lpCapsInfo); typedef int ( *ADL_DISPLAY_CONTROLLEROVERLAYADJUSTMENTDATA_GET ) (int iAdapterIndex, ADLControllerOverlayInput *lpOverlay); typedef int ( *ADL_DISPLAY_CONTROLLEROVERLAYADJUSTMENTDATA_SET ) (int iAdapterIndex, ADLControllerOverlayInput *lpOverlay); // ADL Display/PowerXpress typedef int ( *ADL_DISPLAY_POWERXPRESSVERSION_GET ) (int iAdapterIndex, int *lpVersion); typedef int ( *ADL_DISPLAY_POWERXPRESSACTIVEGPU_GET ) (int iAdapterIndex, int *lpActiveGPU); typedef int ( *ADL_DISPLAY_POWERXPRESSACTIVEGPU_SET ) (int iAdapterIndex, int iActiveGPU, int *lpOperationResult); typedef int ( *ADL_DISPLAY_POWERXPRESS_AUTOSWITCHCONFIG_GET ) (int iAdapterIndex, int *lpAutoSwitchOnACDCEvent, int *lpAutoSwitchOnDCACEvent); typedef int ( *ADL_DISPLAY_POWERXPRESS_AUTOSWITCHCONFIG_SET ) (int iAdapterIndex, int iAutoSwitchOnACDCEvent, int iAutoSwitchOnDCACEvent); // ------------------------------------------------------------------------------------------------------------ // ADL DFP typedef int ( *ADL_DFP_BASEAUDIOSUPPORT_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSupport); typedef int ( *ADL_DFP_HDMISUPPORT_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSupport); typedef int ( *ADL_DFP_MVPUANALOGSUPPORT_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSupport); typedef int ( *ADL_DFP_PIXELFORMAT_CAPS ) (int iAdapterIndex, int iDisplayIndex, int *lpValidBits, int *lpValidCaps); typedef int ( *ADL_DFP_PIXELFORMAT_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpCurState, int *lpDefault); typedef int ( *ADL_DFP_PIXELFORMAT_SET ) (int iAdapterIndex, int iDisplayIndex, int iState); typedef int ( *ADL_DFP_GPUSCALINGENABLE_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSupport, int *lpCurrent, int *lpDefault); typedef int ( *ADL_DFP_GPUSCALINGENABLE_SET ) (int iAdapterIndex, int iDisplayIndex, int iCurrent); typedef int ( *ADL_DFP_ALLOWONLYCETIMINGS_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpSupport, int *lpCurrent, int *lpDefault); typedef int ( *ADL_DFP_ALLOWONLYCETIMINGS_SET ) (int iAdapterIndex, int iDisplayIndex, int iCurrent); // ADl TV typedef int ( *ADL_DISPLAY_TVCAPS_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpcaps); typedef int ( *ADL_TV_STANDARD_SET ) (int iAdapterIndex, int iDisplayIndex, int iCurrent); typedef int ( *ADL_TV_STANDARD_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpCurrent, int *lpDefault, int *lpSupportedStandards); // ADL Component Video typedef int ( *ADL_CV_DONGLESETTINGS_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpDongleSetting, int *lpOverrideSettingsSupported, int *lpCurOverrideSettings); typedef int ( *ADL_CV_DONGLESETTINGS_SET ) (int iAdapterIndex, int iDisplayIndex, int iOverrideSettings); typedef int ( *ADL_CV_DONGLESETTINGS_RESET ) (int iAdapterIndex, int iDisplayIndex); // ------------------------------------------------------------------------------------------------------------ // ADL Overdrive 5 typedef int ( *ADL_OVERDRIVE5_CURRENTACTIVITY_GET ) (int iAdapterIndex, ADLPMActivity *lpActivity); typedef int ( *ADL_OVERDRIVE5_THERMALDEVICES_ENUM ) (int iAdapterIndex, int iThermalControllerIndex, ADLThermalControllerInfo *lpThermalControllerInfo); typedef int ( *ADL_OVERDRIVE5_TEMPERATURE_GET ) (int iAdapterIndex, int iThermalControllerIndex, ADLTemperature *lpTemperature); typedef int ( *ADL_OVERDRIVE5_FANSPEEDINFO_GET ) (int iAdapterIndex, int iThermalControllerIndex, ADLFanSpeedInfo *lpFanSpeedInfo); typedef int ( *ADL_OVERDRIVE5_FANSPEED_GET ) (int iAdapterIndex, int iThermalControllerIndex, ADLFanSpeedValue *lpFanSpeedValue); typedef int ( *ADL_OVERDRIVE5_FANSPEED_SET ) (int iAdapterIndex, int iThermalControllerIndex, ADLFanSpeedValue *lpFanSpeedValue); typedef int ( *ADL_OVERDRIVE5_FANSPEEDTODEFAULT_SET ) (int iAdapterIndex, int iThermalControllerIndex); typedef int ( *ADL_OVERDRIVE5_ODPARAMETERS_GET ) (int iAdapterIndex, ADLODParameters *lpOdParameters); typedef int ( *ADL_OVERDRIVE5_ODPERFORMANCELEVELS_GET ) (int iAdapterIndex, int iDefault, ADLODPerformanceLevels *lpOdPerformanceLevels); typedef int ( *ADL_OVERDRIVE5_ODPERFORMANCELEVELS_SET ) (int iAdapterIndex, ADLODPerformanceLevels *lpOdPerformanceLevels); // ------------------------------------------------------------------------------------------------------------ // ADL I2C typedef int ( *ADL_DISPLAY_WRITEANDREADI2CREV_GET ) (int iAdapterIndex, int *lpMajor, int *lpMinor); typedef int ( *ADL_DISPLAY_WRITEANDREADI2C ) (int iAdapterIndex, ADLI2C *plI2C); typedef int ( *ADL_DISPLAY_DDCBLOCKACCESS_GET ) (int iAdapterIndex, int iDisplayIndex, int iOption, int iCommandIndex, int iSendMsgLen, char *lpucSendMsgBuf, int *lpulRecvMsgLen, char *lpucRecvMsgBuf); typedef int ( *ADL_DISPLAY_DDCINFO_GET ) (int iAdapterIndex, int iDisplayIndex, ADLDDCInfo *lpInfo); typedef int ( *ADL_DISPLAY_EDIDDATA_GET ) (int iAdapterIndex, int iDisplayIndex, ADLDisplayEDIDData *lpEDIDData); // ------------------------------------------------------------------------------------------------------------ // ADL Workstation typedef int ( *ADL_WORKSTATION_CAPS ) (int iAdapterIndex, int *lpValidBits, int *lpCaps); typedef int ( *ADL_WORKSTATION_STEREO_GET ) (int iAdapterIndex, int *lpDefState, int *lpCurState); typedef int ( *ADL_WORKSTATION_STEREO_SET ) (int iAdapterIndex, int iCurState); typedef int ( *ADL_WORKSTATION_ADAPTERNUMOFGLSYNCCONNECTORS_GET ) (int iAdapterIndex, int *lpNumOfGLSyncConnectors); typedef int ( *ADL_WORKSTATION_DISPLAYGENLOCKCAPABLE_GET ) (int iAdapterIndex, int iDisplayIndex, int *lpCanGenlock); typedef int ( *ADL_WORKSTATION_GLSYNCMODULEDETECT_GET ) (int iAdapterIndex, int iGlSyncConnector, ADLGLSyncModuleID *lpGlSyncModuleID); typedef int ( *ADL_WORKSTATION_GLSYNCMODULEINFO_GET ) (int iAdapterIndex, int iGlSyncConnector, int *lpNumGLSyncGPUPorts, int *lpNumGlSyncPorts, int *lpMaxSyncDelay, int *lpMaxSampleRate, ADLGLSyncPortCaps **ppGlSyncPorts); typedef int ( *ADL_WORKSTATION_GLSYNCGENLOCKCONFIGURATION_GET ) (int iAdapterIndex, int iGlSyncConnector, int iGlValidMask, ADLGLSyncGenlockConfig *lpGlSyncGenlockConfig); typedef int ( *ADL_WORKSTATION_GLSYNCGENLOCKCONFIGURATION_SET ) (int iAdapterIndex, int iGlSyncConnector, ADLGLSyncGenlockConfig glSyncGenlockConfig); typedef int ( *ADL_WORKSTATION_GLSYNCPORTSTATE_GET ) (int iAdapterIndex, int iGlSyncConnector, int iGlSyncPortType, int iNumLEDs, ADLGlSyncPortInfo *lpGlSyncPortInfo, int **ppGlSyncLEDs); typedef int ( *ADL_WORKSTATION_GLSYNCPORTSTATE_SET ) (int iAdapterIndex, int iGlSyncConnector, ADLGlSyncPortControl glSyncPortControl); typedef int ( *ADL_WORKSTATION_DISPLAYGLSYNCMODE_GET ) (int iAdapterIndex, int iDisplayIndex, ADLGlSyncMode *lpGlSyncMode); typedef int ( *ADL_WORKSTATION_DISPLAYGLSYNCMODE_SET ) (int iAdapterIndex, int iDisplayIndex, ADLGlSyncMode glSyncMode); typedef int ( *ADL_WORKSTATION_GLSYNCSUPPORTEDTOPOLOGY_GET ) (int iAdapterIndex, int iNumSyncModes, ADLGlSyncMode2 *glSyncModes, int *iNumSugSyncModes, ADLGlSyncMode2 **glSugSyncModes); typedef int ( *ADL_WORKSTATION_LOADBALANCING_GET ) (int *lpResultMask, int *lpCurResultValue, int *lpDefResultValue); typedef int ( *ADL_WORKSTATION_LOADBALANCING_SET ) (int iCurState); typedef int ( *ADL_WORKSTATION_LOADBALANCING_CAPS ) (int iAdapterIndex, int *lpResultMask, int *lpResultValue); // ------------------------------------------------------------------------------------------------------------ #ifdef LINUX // ADL Linux typedef int ( *ADL_ADAPTER_MEMORYINFO_GET ) (int iAdapterIndex, ADLMemoryInfo *lpMemoryInfo); typedef int ( *ADL_CONTROLLER_COLOR_SET ) (int iAdapterIndex, int iControllerIndex, ADLGamma adlGamma); typedef int ( *ADL_CONTROLLER_COLOR_GET ) (int iAdapterIndex, int iControllerIndex, ADLGamma *lpGammaCurrent, ADLGamma *lpGammaDefault, ADLGamma *lpGammaMin, ADLGamma *lpGammaMax); typedef int ( *ADL_DESKTOPCONFIG_GET ) (int iAdapterIndex, int *lpDesktopConfig); typedef int ( *ADL_DESKTOPCONFIG_SET ) (int iAdapterIndex, int iDesktopConfig); typedef int ( *ADL_NUMBEROFDISPLAYENABLE_GET ) (int iAdapterIndex, int *lpNumberOfDisplays); typedef int ( *ADL_DISPLAYENABLE_SET ) (int iAdapterIndex, int *lpDisplayIndexList, int iDisplayListSize, int bPersistOnly); typedef int ( *ADL_DISPLAY_IDENTIFYDISPLAY ) (int iAdapterIndex, int iDisplayIndex, int iDisplayControllerIndex, int iShow, int iDisplayNum, int iPosX, int iPosY); typedef int ( *ADL_DISPLAY_LUTCOLOR_SET ) (int iAdapterIndex, int iDisplayIndex, ADLGamma adlGamma); typedef int ( *ADL_DISPLAY_LUTCOLOR_GET ) (int iAdapterIndex, int iDisplayIndex, ADLGamma *lpGammaCurrent, ADLGamma *lpGammaDefault, ADLGamma *lpGammaMin, ADLGamma *lpGammaMax); typedef int ( *ADL_ADAPTER_XSCREENINFO_GET ) (LPXScreenInfo lpXScreenInfo, int iInputSize); typedef int ( *ADL_DISPLAY_XRANDRDISPLAYNAME_GET ) (int iAdapterIndex, int iDisplayIndex, char *lpXrandrDisplayName, int iBuffSize); #endif // ------------------------------------------------------------------------------------------------------------ // experimental undocumented typedef int ( *ADL_OVERDRIVE5_POWERCONTROL_GET ) (int iAdapterIndex, int* iPercentage, int* whatever); typedef int ( *ADL_OVERDRIVE5_POWERCONTROL_SET ) (int iAdapterIndex, int iPercentage); //typedef int ( *ADL_OVERDRIVE5_POWERCONTROL_CAPS ) (int iAdapterIndex, int* lpCaps, int* lpValid); //typedef int ( *ADL_OVERDRIVE5_POWERCONTROLINFO_GET) (int iAdapterIndex, ...)bfgminer-bfgminer-3.10.0/api-example.c000066400000000000000000000163551226556647300175530ustar00rootroot00000000000000/* * Copyright 2011 Kano * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #include #include "compat.h" #ifndef WIN32 #include #include #include #include #include #define SOCKETTYPE int #define SOCKETFAIL(a) ((a) < 0) #define INVSOCK -1 #define CLOSESOCKET close #define SOCKETINIT do{}while(0) #define SOCKERRMSG strerror(errno) #else #include #define SOCKETTYPE SOCKET #define SOCKETFAIL(a) ((a) == SOCKET_ERROR) #define INVSOCK INVALID_SOCKET #define CLOSESOCKET closesocket static char WSAbuf[1024]; struct WSAERRORS { int id; char *code; } WSAErrors[] = { { 0, "No error" }, { WSAEINTR, "Interrupted system call" }, { WSAEBADF, "Bad file number" }, { WSAEACCES, "Permission denied" }, { WSAEFAULT, "Bad address" }, { WSAEINVAL, "Invalid argument" }, { WSAEMFILE, "Too many open sockets" }, { WSAEWOULDBLOCK, "Operation would block" }, { WSAEINPROGRESS, "Operation now in progress" }, { WSAEALREADY, "Operation already in progress" }, { WSAENOTSOCK, "Socket operation on non-socket" }, { WSAEDESTADDRREQ, "Destination address required" }, { WSAEMSGSIZE, "Message too long" }, { WSAEPROTOTYPE, "Protocol wrong type for socket" }, { WSAENOPROTOOPT, "Bad protocol option" }, { WSAEPROTONOSUPPORT, "Protocol not supported" }, { WSAESOCKTNOSUPPORT, "Socket type not supported" }, { WSAEOPNOTSUPP, "Operation not supported on socket" }, { WSAEPFNOSUPPORT, "Protocol family not supported" }, { WSAEAFNOSUPPORT, "Address family not supported" }, { WSAEADDRINUSE, "Address already in use" }, { WSAEADDRNOTAVAIL, "Can't assign requested address" }, { WSAENETDOWN, "Network is down" }, { WSAENETUNREACH, "Network is unreachable" }, { WSAENETRESET, "Net connection reset" }, { WSAECONNABORTED, "Software caused connection abort" }, { WSAECONNRESET, "Connection reset by peer" }, { WSAENOBUFS, "No buffer space available" }, { WSAEISCONN, "Socket is already connected" }, { WSAENOTCONN, "Socket is not connected" }, { WSAESHUTDOWN, "Can't send after socket shutdown" }, { WSAETOOMANYREFS, "Too many references, can't splice" }, { WSAETIMEDOUT, "Connection timed out" }, { WSAECONNREFUSED, "Connection refused" }, { WSAELOOP, "Too many levels of symbolic links" }, { WSAENAMETOOLONG, "File name too long" }, { WSAEHOSTDOWN, "Host is down" }, { WSAEHOSTUNREACH, "No route to host" }, { WSAENOTEMPTY, "Directory not empty" }, { WSAEPROCLIM, "Too many processes" }, { WSAEUSERS, "Too many users" }, { WSAEDQUOT, "Disc quota exceeded" }, { WSAESTALE, "Stale NFS file handle" }, { WSAEREMOTE, "Too many levels of remote in path" }, { WSASYSNOTREADY, "Network system is unavailable" }, { WSAVERNOTSUPPORTED, "Winsock version out of range" }, { WSANOTINITIALISED, "WSAStartup not yet called" }, { WSAEDISCON, "Graceful shutdown in progress" }, { WSAHOST_NOT_FOUND, "Host not found" }, { WSANO_DATA, "No host data of that type was found" }, { -1, "Unknown error code" } }; static char *WSAErrorMsg() { int i; int id = WSAGetLastError(); /* Assume none of them are actually -1 */ for (i = 0; WSAErrors[i].id != -1; i++) if (WSAErrors[i].id == id) break; sprintf(WSAbuf, "Socket Error: (%d) %s", id, WSAErrors[i].code); return &(WSAbuf[0]); } #define SOCKERRMSG WSAErrorMsg() static WSADATA WSA_Data; #define SOCKETINIT do { \ int wsa; \ if ( (wsa = WSAStartup(0x0202, &WSA_Data)) ) { \ printf("Socket startup failed: %d\n", wsa); \ return 1; \ } \ } while (0) #ifndef SHUT_RDWR #define SHUT_RDWR SD_BOTH #endif #endif #define RECVSIZE 65500 static const char SEPARATOR = '|'; static const char COMMA = ','; static const char EQ = '='; static int ONLY; void display(char *buf) { char *nextobj, *item, *nextitem, *eq; int itemcount; while (buf != NULL) { nextobj = strchr(buf, SEPARATOR); if (nextobj != NULL) *(nextobj++) = '\0'; if (*buf) { item = buf; itemcount = 0; while (item != NULL) { nextitem = strchr(item, COMMA); if (nextitem != NULL) *(nextitem++) = '\0'; if (*item) { eq = strchr(item, EQ); if (eq != NULL) *(eq++) = '\0'; if (itemcount == 0) printf("[%s%s] =>\n(\n", item, (eq != NULL && isdigit(*eq)) ? eq : ""); if (eq != NULL) printf(" [%s] => %s\n", item, eq); else printf(" [%d] => %s\n", itemcount, item); } item = nextitem; itemcount++; } if (itemcount > 0) puts(")"); } buf = nextobj; } } int callapi(char *command, char *host, short int port) { size_t bufsz = RECVSIZE; char *buf = malloc(bufsz+1); struct hostent *ip; struct sockaddr_in serv; SOCKETTYPE sock; int ret = 0; int n, p; assert(buf); SOCKETINIT; ip = gethostbyname(host); if (!ip) { printf("Failed to resolve host %s\n", host); return 1; } sock = socket(AF_INET, SOCK_STREAM, 0); if (sock == INVSOCK) { printf("Socket initialisation failed: %s\n", SOCKERRMSG); return 1; } memset(&serv, 0, sizeof(serv)); serv.sin_family = AF_INET; serv.sin_addr = *((struct in_addr *)ip->h_addr); serv.sin_port = htons(port); if (SOCKETFAIL(connect(sock, (struct sockaddr *)&serv, sizeof(struct sockaddr)))) { printf("Socket connect failed: %s\n", SOCKERRMSG); return 1; } n = send(sock, command, strlen(command), 0); if (SOCKETFAIL(n)) { printf("Send failed: %s\n", SOCKERRMSG); ret = 1; } else { p = 0; buf[0] = '\0'; while (true) { if (bufsz < RECVSIZE + p) { bufsz *= 2; buf = realloc(buf, bufsz); assert(buf); } n = recv(sock, &buf[p], RECVSIZE, 0); if (SOCKETFAIL(n)) { printf("Recv failed: %s\n", SOCKERRMSG); ret = 1; break; } if (n == 0) break; p += n; buf[p] = '\0'; } if (!ONLY) printf("Reply was '%s'\n", buf); else printf("%s\n", buf); if (!ONLY) display(buf); } CLOSESOCKET(sock); return ret; } static char *trim(char *str) { char *ptr; while (isspace(*str)) str++; ptr = strchr(str, '\0'); while (ptr-- > str) { if (isspace(*ptr)) *ptr = '\0'; } return str; } int main(int argc, char *argv[]) { char *command = "summary"; char *host = "127.0.0.1"; short int port = 4028; char *ptr; int i = 1; if (argc > 1) if (strcmp(argv[1], "-?") == 0 || strcmp(argv[1], "-h") == 0 || strcmp(argv[1], "--help") == 0) { fprintf(stderr, "Usage: %s [command [ip/host [port]]]\n", argv[0]); return 1; } if (argc > 1) if (strcmp(argv[1], "-o") == 0) { ONLY = 1; i = 2; } if (argc > i) { ptr = trim(argv[i++]); if (strlen(ptr) > 0) command = ptr; } if (argc > i) { ptr = trim(argv[i++]); if (strlen(ptr) > 0) host = ptr; } if (argc > i) { ptr = trim(argv[i]); if (strlen(ptr) > 0) port = atoi(ptr); } return callapi(command, host, port); } bfgminer-bfgminer-3.10.0/api-example.php000066400000000000000000000040111226556647300201020ustar00rootroot00000000000000 0) { $items = explode(',', $obj); $item = $items[0]; $id = explode('=', $items[0], 2); if (count($id) == 1 or !ctype_digit($id[1])) $name = $id[0]; else $name = $id[0].$id[1]; if (strlen($name) == 0) $name = 'null'; if (isset($data[$name])) { $num = 1; while (isset($data[$name.$num])) $num++; $name .= $num; } $counter = 0; foreach ($items as $item) { $id = explode('=', $item, 2); if (count($id) == 2) $data[$name][$id[0]] = $id[1]; else $data[$name][$counter] = $id[0]; $counter++; } } } return $data; } return null; } # if (isset($argv) and count($argv) > 1) $r = request($argv[1]); else $r = request('summary'); # echo print_r($r, true)."\n"; # ?> bfgminer-bfgminer-3.10.0/api-example.py000077500000000000000000000027461226556647300177630ustar00rootroot00000000000000#!/usr/bin/python # Copyright 2013 Christian Berendt # Copyright 2013 Luke Dashjr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 3 of the License, or (at your option) any later # version. See COPYING for more details. import argparse import json import logging import pprint import socket logging.basicConfig( format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG ) parser = argparse.ArgumentParser() parser.add_argument("command", default="summary", nargs='?') parser.add_argument("parameter", default="", nargs='?') parser.add_argument("--hostname", default="localhost") parser.add_argument("--port", type=int, default=4028) args = parser.parse_args() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((args.hostname, args.port)) except socket.error, e: logging.error(e) try: s.send("{\"command\" : \"%s\", \"parameter\" : \"%s\"}" % (args.command, args.parameter) ) except socket.error, e: logging.error(e) data = '' while True: try: newdata = s.recv(1024) if newdata: data += newdata else: break except socket.error, e: break try: s.close() except socket.error,e: logging.error(e) if data: data = json.loads(data.replace('\x00', '')) pp = pprint.PrettyPrinter() pp.pprint(data) bfgminer-bfgminer-3.10.0/api.c000066400000000000000000003377341226556647300161310ustar00rootroot00000000000000/* * Copyright 2011-2013 Andrew Smith * Copyright 2011-2013 Con Kolivas * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. * * Note: the code always includes GPU support even if there are no GPUs * this simplifies handling multiple other device code being included * depending on compile options */ #define _MEMORY_DEBUG_MASTER 1 #include "config.h" #include #include #include #include #include #include #include #include #include "compat.h" #include "deviceapi.h" #ifdef USE_LIBMICROHTTPD #include "httpsrv.h" #endif #include "miner.h" #include "util.h" #include "driver-cpu.h" /* for algo_names[], TODO: re-factor dependency */ #define HAVE_AN_FPGA 1 // Big enough for largest API request // though a PC with 100s of PGAs/CPUs may exceed the size ... // data is truncated at the end of the last record that fits // but still closed correctly for JSON // Current code assumes it can socket send this size + JSON_CLOSE + JSON_END #define SOCKBUFSIZ 65432 // BUFSIZ varies on Windows and Linux #define TMPBUFSIZ 8192 // Number of requests to queue - normally would be small // However lots of PGA's may mean more #define QUEUE 100 static const char *UNAVAILABLE = " - API will not be available"; static const char *MUNAVAILABLE = " - API multicast listener will not be available"; static const char *BLANK = ""; static const char *COMMA = ","; #define COMSTR "," static const char SEPARATOR = '|'; #define SEPSTR "|" static const char GPUSEP = ','; static const char *APIVERSION = "2.3"; static const char *DEAD = "Dead"; static const char *SICK = "Sick"; static const char *NOSTART = "NoStart"; static const char *INIT = "Initialising"; static const char *WAIT = "Waiting"; static const char *DISABLED = "Disabled"; static const char *ALIVE = "Alive"; static const char *REJECTING = "Rejecting"; static const char *UNKNOWN = "Unknown"; #define _DYNAMIC "D" #ifdef HAVE_OPENCL static const char *DYNAMIC = _DYNAMIC; #endif static const char *YES = "Y"; static const char *NO = "N"; static const char *NULLSTR = "(null)"; static const char *TRUESTR = "true"; static const char *FALSESTR = "false"; #ifdef USE_SCRYPT static const char *SCRYPTSTR = "scrypt"; #endif static const char *SHA256STR = "sha256"; static const char *OSINFO = #if defined(__linux) "Linux"; #else #if defined(__APPLE__) "Apple"; #else #if defined (__CYGWIN__) "Cygwin"; #elif defined (WIN32) "Windows"; #else #if defined(unix) "Unix"; #else "Unknown"; #endif #endif #endif #endif #define _DEVS "DEVS" #define _POOLS "POOLS" #define _SUMMARY "SUMMARY" #define _STATUS "STATUS" #define _VERSION "VERSION" #define _MINECONFIG "CONFIG" #define _GPU "GPU" #ifdef HAVE_AN_FPGA #define _PGA "PGA" #endif #ifdef WANT_CPUMINE #define _CPU "CPU" #endif #define _GPUS "GPUS" #define _PGAS "PGAS" #define _CPUS "CPUS" #define _NOTIFY "NOTIFY" #define _DEVDETAILS "DEVDETAILS" #define _BYE "BYE" #define _RESTART "RESTART" #define _MINESTATS "STATS" #define _CHECK "CHECK" #define _MINECOIN "COIN" #define _DEBUGSET "DEBUG" #define _SETCONFIG "SETCONFIG" static const char ISJSON = '{'; #define JSON0 "{" #define JSON1 "\"" #define JSON2 "\":[" #define JSON3 "]" #define JSON4 ",\"id\":1" // If anyone cares, id=0 for truncated output #define JSON4_TRUNCATED ",\"id\":0" #define JSON5 "}" #define JSON_START JSON0 #define JSON_DEVS JSON1 _DEVS JSON2 #define JSON_POOLS JSON1 _POOLS JSON2 #define JSON_SUMMARY JSON1 _SUMMARY JSON2 #define JSON_STATUS JSON1 _STATUS JSON2 #define JSON_VERSION JSON1 _VERSION JSON2 #define JSON_MINECONFIG JSON1 _MINECONFIG JSON2 #define JSON_GPU JSON1 _GPU JSON2 #ifdef HAVE_AN_FPGA #define JSON_PGA JSON1 _PGA JSON2 #endif #ifdef WANT_CPUMINE #define JSON_CPU JSON1 _CPU JSON2 #endif #define JSON_GPUS JSON1 _GPUS JSON2 #define JSON_PGAS JSON1 _PGAS JSON2 #define JSON_CPUS JSON1 _CPUS JSON2 #define JSON_NOTIFY JSON1 _NOTIFY JSON2 #define JSON_DEVDETAILS JSON1 _DEVDETAILS JSON2 #define JSON_BYE JSON1 _BYE JSON1 #define JSON_RESTART JSON1 _RESTART JSON1 #define JSON_CLOSE JSON3 #define JSON_MINESTATS JSON1 _MINESTATS JSON2 #define JSON_CHECK JSON1 _CHECK JSON2 #define JSON_MINECOIN JSON1 _MINECOIN JSON2 #define JSON_DEBUGSET JSON1 _DEBUGSET JSON2 #define JSON_SETCONFIG JSON1 _SETCONFIG JSON2 #define JSON_END JSON4 JSON5 #define JSON_END_TRUNCATED JSON4_TRUNCATED JSON5 static const char *JSON_COMMAND = "command"; static const char *JSON_PARAMETER = "parameter"; #define MSG_INVGPU 1 #define MSG_ALRENA 2 #define MSG_ALRDIS 3 #define MSG_GPUMRE 4 #define MSG_GPUREN 5 #define MSG_GPUNON 6 #define MSG_POOL 7 #define MSG_NOPOOL 8 #define MSG_DEVS 9 #define MSG_NODEVS 10 #define MSG_SUMM 11 #define MSG_GPUDIS 12 #define MSG_GPUREI 13 #define MSG_INVCMD 14 #define MSG_MISID 15 #define MSG_GPUDEV 17 #ifdef WANT_CPUMINE #define MSG_CPUNON 16 #define MSG_CPUDEV 18 #define MSG_INVCPU 19 #define MSG_ALRENAC 98 #define MSG_ALRDISC 99 #define MSG_CPUMRE 100 #define MSG_CPUREN 101 #define MSG_CPUDIS 102 #define MSG_CPUREI 103 #endif #define MSG_NUMGPU 20 #define MSG_NUMCPU 21 #define MSG_VERSION 22 #define MSG_INVJSON 23 #define MSG_MISCMD 24 #define MSG_MISPID 25 #define MSG_INVPID 26 #define MSG_SWITCHP 27 #define MSG_MISVAL 28 #define MSG_NOADL 29 #define MSG_NOGPUADL 30 #define MSG_INVINT 31 #define MSG_GPUINT 32 #define MSG_MINECONFIG 33 #define MSG_GPUMERR 34 #define MSG_GPUMEM 35 #define MSG_GPUEERR 36 #define MSG_GPUENG 37 #define MSG_GPUVERR 38 #define MSG_GPUVDDC 39 #define MSG_GPUFERR 40 #define MSG_GPUFAN 41 #define MSG_MISFN 42 #define MSG_BADFN 43 #define MSG_SAVED 44 #define MSG_ACCDENY 45 #define MSG_ACCOK 46 #define MSG_ENAPOOL 47 #define MSG_DISPOOL 48 #define MSG_ALRENAP 49 #define MSG_ALRDISP 50 #define MSG_DISLASTP 51 #define MSG_MISPDP 52 #define MSG_INVPDP 53 #define MSG_TOOMANYP 54 #define MSG_ADDPOOL 55 #ifdef HAVE_AN_FPGA #define MSG_PGANON 56 #define MSG_PGADEV 57 #define MSG_INVPGA 58 #endif #define MSG_NUMPGA 59 #define MSG_NOTIFY 60 #ifdef HAVE_AN_FPGA #define MSG_PGALRENA 61 #define MSG_PGALRDIS 62 #define MSG_PGAENA 63 #define MSG_PGADIS 64 #define MSG_PGAUNW 65 #endif #define MSG_REMLASTP 66 #define MSG_ACTPOOL 67 #define MSG_REMPOOL 68 #define MSG_DEVDETAILS 69 #define MSG_MINESTATS 70 #define MSG_MISCHK 71 #define MSG_CHECK 72 #define MSG_POOLPRIO 73 #define MSG_DUPPID 74 #define MSG_MISBOOL 75 #define MSG_INVBOOL 76 #define MSG_FOO 77 #define MSG_MINECOIN 78 #define MSG_DEBUGSET 79 #define MSG_PGAIDENT 80 #define MSG_PGANOID 81 #define MSG_SETCONFIG 82 #define MSG_UNKCON 83 #define MSG_INVNUM 84 #define MSG_CONPAR 85 #define MSG_CONVAL 86 #ifdef HAVE_AN_FPGA #define MSG_MISPGAOPT 89 #define MSG_PGANOSET 90 #define MSG_PGAHELP 91 #define MSG_PGASETOK 92 #define MSG_PGASETERR 93 #endif #define MSG_ZERMIS 94 #define MSG_ZERINV 95 #define MSG_ZERSUM 96 #define MSG_ZERNOSUM 97 #define MSG_DEVSCAN 0x100 #define MSG_INVNEG 121 #define MSG_SETQUOTA 122 enum code_severity { SEVERITY_ERR, SEVERITY_WARN, SEVERITY_INFO, SEVERITY_SUCC, SEVERITY_FAIL }; enum code_parameters { PARAM_COUNT, PARAM_GPU, PARAM_PGA, PARAM_CPU, PARAM_PID, PARAM_GPUMAX, PARAM_PGAMAX, PARAM_CPUMAX, PARAM_PMAX, PARAM_POOLMAX, // Single generic case: have the code resolve it - see below PARAM_DMAX, PARAM_CMD, PARAM_POOL, PARAM_STR, PARAM_BOTH, PARAM_BOOL, PARAM_SET, PARAM_NONE }; struct CODES { const enum code_severity severity; const int code; const enum code_parameters params; const char *description; } codes[] = { #ifdef HAVE_OPENCL { SEVERITY_ERR, MSG_INVGPU, PARAM_GPUMAX, "Invalid GPU id %d - range is 0 - %d" }, { SEVERITY_INFO, MSG_ALRENA, PARAM_GPU, "GPU %d already enabled" }, { SEVERITY_INFO, MSG_ALRDIS, PARAM_GPU, "GPU %d already disabled" }, { SEVERITY_WARN, MSG_GPUMRE, PARAM_GPU, "GPU %d must be restarted first" }, { SEVERITY_INFO, MSG_GPUREN, PARAM_GPU, "GPU %d sent enable message" }, #endif { SEVERITY_ERR, MSG_GPUNON, PARAM_NONE, "No GPUs" }, { SEVERITY_SUCC, MSG_POOL, PARAM_PMAX, "%d Pool(s)" }, { SEVERITY_ERR, MSG_NOPOOL, PARAM_NONE, "No pools" }, { SEVERITY_SUCC, MSG_DEVS, PARAM_DMAX, #ifdef HAVE_OPENCL "%d GPU(s)" #endif #if defined(HAVE_AN_FPGA) && defined(HAVE_OPENCL) " - " #endif #ifdef HAVE_AN_FPGA "%d PGA(s)" #endif #if defined(WANT_CPUMINE) && (defined(HAVE_OPENCL) || defined(HAVE_AN_FPGA)) " - " #endif #ifdef WANT_CPUMINE "%d CPU(s)" #endif }, { SEVERITY_ERR, MSG_NODEVS, PARAM_NONE, "No GPUs" #ifdef HAVE_AN_FPGA "/PGAs" #endif #ifdef WANT_CPUMINE "/CPUs" #endif }, { SEVERITY_SUCC, MSG_SUMM, PARAM_NONE, "Summary" }, #ifdef HAVE_OPENCL { SEVERITY_INFO, MSG_GPUDIS, PARAM_GPU, "GPU %d set disable flag" }, { SEVERITY_INFO, MSG_GPUREI, PARAM_GPU, "GPU %d restart attempted" }, #endif { SEVERITY_ERR, MSG_INVCMD, PARAM_NONE, "Invalid command" }, { SEVERITY_ERR, MSG_MISID, PARAM_NONE, "Missing device id parameter" }, #ifdef HAVE_OPENCL { SEVERITY_SUCC, MSG_GPUDEV, PARAM_GPU, "GPU%d" }, #endif #ifdef HAVE_AN_FPGA { SEVERITY_ERR, MSG_PGANON, PARAM_NONE, "No PGAs" }, { SEVERITY_SUCC, MSG_PGADEV, PARAM_PGA, "PGA%d" }, { SEVERITY_ERR, MSG_INVPGA, PARAM_PGAMAX, "Invalid PGA id %d - range is 0 - %d" }, { SEVERITY_INFO, MSG_PGALRENA,PARAM_PGA, "PGA %d already enabled" }, { SEVERITY_INFO, MSG_PGALRDIS,PARAM_PGA, "PGA %d already disabled" }, { SEVERITY_INFO, MSG_PGAENA, PARAM_PGA, "PGA %d sent enable message" }, { SEVERITY_INFO, MSG_PGADIS, PARAM_PGA, "PGA %d set disable flag" }, { SEVERITY_ERR, MSG_PGAUNW, PARAM_PGA, "PGA %d is not flagged WELL, cannot enable" }, #endif #ifdef WANT_CPUMINE { SEVERITY_ERR, MSG_CPUNON, PARAM_NONE, "No CPUs" }, { SEVERITY_SUCC, MSG_CPUDEV, PARAM_CPU, "CPU%d" }, { SEVERITY_ERR, MSG_INVCPU, PARAM_CPUMAX, "Invalid CPU id %d - range is 0 - %d" }, { SEVERITY_INFO, MSG_ALRENAC, PARAM_CPU, "CPU %d already enabled" }, { SEVERITY_INFO, MSG_ALRDISC, PARAM_CPU, "CPU %d already disabled" }, { SEVERITY_WARN, MSG_CPUMRE, PARAM_CPU, "CPU %d must be restarted first" }, { SEVERITY_INFO, MSG_CPUREN, PARAM_CPU, "CPU %d sent enable message" }, { SEVERITY_INFO, MSG_CPUDIS, PARAM_CPU, "CPU %d set disable flag" }, { SEVERITY_INFO, MSG_CPUREI, PARAM_CPU, "CPU %d restart attempted" }, #endif { SEVERITY_SUCC, MSG_NUMGPU, PARAM_NONE, "GPU count" }, { SEVERITY_SUCC, MSG_NUMPGA, PARAM_NONE, "PGA count" }, { SEVERITY_SUCC, MSG_NUMCPU, PARAM_NONE, "CPU count" }, { SEVERITY_SUCC, MSG_VERSION, PARAM_NONE, "BFGMiner versions" }, { SEVERITY_ERR, MSG_INVJSON, PARAM_NONE, "Invalid JSON" }, { SEVERITY_ERR, MSG_MISCMD, PARAM_CMD, "Missing JSON '%s'" }, { SEVERITY_ERR, MSG_MISPID, PARAM_NONE, "Missing pool id parameter" }, { SEVERITY_ERR, MSG_INVPID, PARAM_POOLMAX, "Invalid pool id %d - range is 0 - %d" }, { SEVERITY_SUCC, MSG_SWITCHP, PARAM_POOL, "Switching to pool %d:'%s'" }, { SEVERITY_ERR, MSG_MISVAL, PARAM_NONE, "Missing comma after GPU number" }, { SEVERITY_ERR, MSG_NOADL, PARAM_NONE, "ADL is not available" }, { SEVERITY_ERR, MSG_NOGPUADL,PARAM_GPU, "GPU %d does not have ADL" }, { SEVERITY_ERR, MSG_INVINT, PARAM_STR, "Invalid intensity (%s) - must be '" _DYNAMIC "' or range " MIN_SHA_INTENSITY_STR " - " MAX_SCRYPT_INTENSITY_STR }, { SEVERITY_INFO, MSG_GPUINT, PARAM_BOTH, "GPU %d set new intensity to %s" }, { SEVERITY_SUCC, MSG_MINECONFIG,PARAM_NONE, "BFGMiner config" }, #ifdef HAVE_OPENCL { SEVERITY_ERR, MSG_GPUMERR, PARAM_BOTH, "Setting GPU %d memoryclock to (%s) reported failure" }, { SEVERITY_SUCC, MSG_GPUMEM, PARAM_BOTH, "Setting GPU %d memoryclock to (%s) reported success" }, { SEVERITY_ERR, MSG_GPUEERR, PARAM_BOTH, "Setting GPU %d clock to (%s) reported failure" }, { SEVERITY_SUCC, MSG_GPUENG, PARAM_BOTH, "Setting GPU %d clock to (%s) reported success" }, { SEVERITY_ERR, MSG_GPUVERR, PARAM_BOTH, "Setting GPU %d vddc to (%s) reported failure" }, { SEVERITY_SUCC, MSG_GPUVDDC, PARAM_BOTH, "Setting GPU %d vddc to (%s) reported success" }, { SEVERITY_ERR, MSG_GPUFERR, PARAM_BOTH, "Setting GPU %d fan to (%s) reported failure" }, { SEVERITY_SUCC, MSG_GPUFAN, PARAM_BOTH, "Setting GPU %d fan to (%s) reported success" }, #endif { SEVERITY_ERR, MSG_MISFN, PARAM_NONE, "Missing save filename parameter" }, { SEVERITY_ERR, MSG_BADFN, PARAM_STR, "Can't open or create save file '%s'" }, { SEVERITY_SUCC, MSG_SAVED, PARAM_STR, "Configuration saved to file '%s'" }, { SEVERITY_ERR, MSG_ACCDENY, PARAM_STR, "Access denied to '%s' command" }, { SEVERITY_SUCC, MSG_ACCOK, PARAM_NONE, "Privileged access OK" }, { SEVERITY_SUCC, MSG_ENAPOOL, PARAM_POOL, "Enabling pool %d:'%s'" }, { SEVERITY_SUCC, MSG_POOLPRIO,PARAM_NONE, "Changed pool priorities" }, { SEVERITY_ERR, MSG_DUPPID, PARAM_PID, "Duplicate pool specified %d" }, { SEVERITY_SUCC, MSG_DISPOOL, PARAM_POOL, "Disabling pool %d:'%s'" }, { SEVERITY_INFO, MSG_ALRENAP, PARAM_POOL, "Pool %d:'%s' already enabled" }, { SEVERITY_INFO, MSG_ALRDISP, PARAM_POOL, "Pool %d:'%s' already disabled" }, { SEVERITY_ERR, MSG_DISLASTP,PARAM_POOL, "Cannot disable last active pool %d:'%s'" }, { SEVERITY_ERR, MSG_MISPDP, PARAM_NONE, "Missing addpool details" }, { SEVERITY_ERR, MSG_INVPDP, PARAM_STR, "Invalid addpool details '%s'" }, { SEVERITY_ERR, MSG_TOOMANYP,PARAM_NONE, "Reached maximum number of pools (%d)" }, { SEVERITY_SUCC, MSG_ADDPOOL, PARAM_STR, "Added pool '%s'" }, { SEVERITY_ERR, MSG_REMLASTP,PARAM_POOL, "Cannot remove last pool %d:'%s'" }, { SEVERITY_ERR, MSG_ACTPOOL, PARAM_POOL, "Cannot remove active pool %d:'%s'" }, { SEVERITY_SUCC, MSG_REMPOOL, PARAM_BOTH, "Removed pool %d:'%s'" }, { SEVERITY_SUCC, MSG_NOTIFY, PARAM_NONE, "Notify" }, { SEVERITY_SUCC, MSG_DEVDETAILS,PARAM_NONE, "Device Details" }, { SEVERITY_SUCC, MSG_MINESTATS,PARAM_NONE, "BFGMiner stats" }, { SEVERITY_ERR, MSG_MISCHK, PARAM_NONE, "Missing check cmd" }, { SEVERITY_SUCC, MSG_CHECK, PARAM_NONE, "Check command" }, { SEVERITY_ERR, MSG_MISBOOL, PARAM_NONE, "Missing parameter: true/false" }, { SEVERITY_ERR, MSG_INVBOOL, PARAM_NONE, "Invalid parameter should be true or false" }, { SEVERITY_SUCC, MSG_FOO, PARAM_BOOL, "Failover-Only set to %s" }, { SEVERITY_SUCC, MSG_MINECOIN,PARAM_NONE, "BFGMiner coin" }, { SEVERITY_SUCC, MSG_DEBUGSET,PARAM_NONE, "Debug settings" }, #ifdef HAVE_AN_FPGA { SEVERITY_SUCC, MSG_PGAIDENT,PARAM_PGA, "Identify command sent to PGA%d" }, { SEVERITY_WARN, MSG_PGANOID, PARAM_PGA, "PGA%d does not support identify" }, #endif { SEVERITY_SUCC, MSG_SETCONFIG,PARAM_SET, "Set config '%s' to %d" }, { SEVERITY_ERR, MSG_UNKCON, PARAM_STR, "Unknown config '%s'" }, { SEVERITY_ERR, MSG_INVNUM, PARAM_BOTH, "Invalid number (%d) for '%s' range is 0-9999" }, { SEVERITY_ERR, MSG_INVNEG, PARAM_BOTH, "Invalid negative number (%d) for '%s'" }, { SEVERITY_SUCC, MSG_SETQUOTA,PARAM_SET, "Set pool '%s' to quota %d'" }, { SEVERITY_ERR, MSG_CONPAR, PARAM_NONE, "Missing config parameters 'name,N'" }, { SEVERITY_ERR, MSG_CONVAL, PARAM_STR, "Missing config value N for '%s,N'" }, #ifdef HAVE_AN_FPGA { SEVERITY_ERR, MSG_MISPGAOPT, PARAM_NONE, "Missing option after PGA number" }, { SEVERITY_WARN, MSG_PGANOSET, PARAM_PGA, "PGA %d does not support pgaset" }, { SEVERITY_INFO, MSG_PGAHELP, PARAM_BOTH, "PGA %d set help: %s" }, { SEVERITY_SUCC, MSG_PGASETOK, PARAM_BOTH, "PGA %d set OK" }, { SEVERITY_ERR, MSG_PGASETERR, PARAM_BOTH, "PGA %d set failed: %s" }, #endif { SEVERITY_ERR, MSG_ZERMIS, PARAM_NONE, "Missing zero parameters" }, { SEVERITY_ERR, MSG_ZERINV, PARAM_STR, "Invalid zero parameter '%s'" }, { SEVERITY_SUCC, MSG_ZERSUM, PARAM_STR, "Zeroed %s stats with summary" }, { SEVERITY_SUCC, MSG_ZERNOSUM, PARAM_STR, "Zeroed %s stats without summary" }, { SEVERITY_SUCC, MSG_DEVSCAN, PARAM_COUNT, "Added %d new device(s)" }, { SEVERITY_FAIL, 0, 0, NULL } }; static const char *localaddr = "127.0.0.1"; static int my_thr_id = 0; static bool bye; // Used to control quit restart access to shutdown variables static pthread_mutex_t quit_restart_lock; static bool do_a_quit; static bool do_a_restart; static time_t when = 0; // when the request occurred static bool per_proc; struct IP4ACCESS { in_addr_t ip; in_addr_t mask; char group; }; #define GROUP(g) (toupper(g)) #define PRIVGROUP GROUP('W') #define NOPRIVGROUP GROUP('R') #define ISPRIVGROUP(g) (GROUP(g) == PRIVGROUP) #define GROUPOFFSET(g) (GROUP(g) - GROUP('A')) #define VALIDGROUP(g) (GROUP(g) >= GROUP('A') && GROUP(g) <= GROUP('Z')) #define COMMANDS(g) (apigroups[GROUPOFFSET(g)].commands) #define DEFINEDGROUP(g) (ISPRIVGROUP(g) || COMMANDS(g) != NULL) struct APIGROUPS { // This becomes a string like: "|cmd1|cmd2|cmd3|" so it's quick to search char *commands; } apigroups['Z' - 'A' + 1]; // only A=0 to Z=25 (R: noprivs, W: allprivs) static struct IP4ACCESS *ipaccess = NULL; static int ips = 0; #ifdef HAVE_OPENCL extern struct device_drv opencl_api; #endif #ifdef WANT_CPUMINE extern struct device_drv cpu_drv; #endif struct io_data { bytes_t data; SOCKETTYPE sock; // Whether to add various things bool close; }; static struct io_data *rpc_io_data; static void io_reinit(struct io_data *io_data) { bytes_reset(&io_data->data); io_data->close = false; } static struct io_data *sock_io_new() { struct io_data *io_data = malloc(sizeof(struct io_data)); bytes_init(&io_data->data); io_data->sock = INVSOCK; io_reinit(io_data); return io_data; } static size_t io_flush(struct io_data *io_data, bool complete) { size_t sent = 0, tosend = bytes_len(&io_data->data); ssize_t n; struct timeval timeout = {0, complete ? 50000: 0}, tv; fd_set wd; int count = 0; while (tosend) { FD_ZERO(&wd); FD_SET(io_data->sock, &wd); tv = timeout; if (select(io_data->sock + 1, NULL, &wd, NULL, &tv) < 1) break; n = send(io_data->sock, (void*)&bytes_buf(&io_data->data)[sent], tosend, 0); if (SOCKETFAIL(n)) { if (!sock_blocks()) applog(LOG_WARNING, "API: send (%lu) failed: %s", (unsigned long)tosend, SOCKERRMSG); break; } if (count <= 1) { if (n == tosend) applog(LOG_DEBUG, "API: sent all of %lu first go", (unsigned long)tosend); else applog(LOG_DEBUG, "API: sent %ld of %lu first go", (long)n, (unsigned long)tosend); } else { if (n == tosend) applog(LOG_DEBUG, "API: sent all of remaining %lu (count=%d)", (unsigned long)tosend, count); else applog(LOG_DEBUG, "API: sent %ld of remaining %lu (count=%d)", (long)n, (unsigned long)tosend, count); } sent += n; tosend -= n; } bytes_shift(&io_data->data, sent); return sent; } static bool io_add(struct io_data *io_data, char *buf) { size_t len = strlen(buf); if (bytes_len(&io_data->data) + len > SOCKBUFSIZ) io_flush(io_data, false); bytes_append(&io_data->data, buf, len); return true; } static bool io_put(struct io_data *io_data, char *buf) { bytes_reset(&io_data->data); return io_add(io_data, buf); } static void io_close(struct io_data *io_data) { io_data->close = true; } static void io_free() { bytes_free(&rpc_io_data->data); free(rpc_io_data); rpc_io_data = NULL; } // This is only called when expected to be needed (rarely) // i.e. strings outside of the codes control (input from the user) static char *escape_string(char *str, bool isjson) { char *buf, *ptr; int count; count = 0; for (ptr = str; *ptr; ptr++) { switch (*ptr) { case ',': case '|': case '=': if (!isjson) count++; break; case '"': if (isjson) count++; break; case '\\': count++; break; } } if (count == 0) return str; buf = malloc(strlen(str) + count + 1); if (unlikely(!buf)) quit(1, "Failed to malloc escape buf"); ptr = buf; while (*str) switch (*str) { case ',': case '|': case '=': if (!isjson) *(ptr++) = '\\'; *(ptr++) = *(str++); break; case '"': if (isjson) *(ptr++) = '\\'; *(ptr++) = *(str++); break; case '\\': *(ptr++) = '\\'; *(ptr++) = *(str++); break; default: *(ptr++) = *(str++); break; } *ptr = '\0'; return buf; } static struct api_data *api_add_extra(struct api_data *root, struct api_data *extra) { struct api_data *tmp; if (root) { if (extra) { // extra tail tmp = extra->prev; // extra prev = root tail extra->prev = root->prev; // root tail next = extra root->prev->next = extra; // extra tail next = root tmp->next = root; // root prev = extra tail root->prev = tmp; } } else root = extra; return root; } static struct api_data *api_add_data_full(struct api_data *root, char *name, enum api_data_type type, void *data, bool copy_data) { struct api_data *api_data; api_data = (struct api_data *)malloc(sizeof(struct api_data)); api_data->name = strdup(name); api_data->type = type; if (root == NULL) { root = api_data; root->prev = root; root->next = root; } else { api_data->prev = root->prev; root->prev = api_data; api_data->next = root; api_data->prev->next = api_data; } api_data->data_was_malloc = copy_data; // Avoid crashing on bad data if (data == NULL) { api_data->type = type = API_CONST; data = (void *)NULLSTR; api_data->data_was_malloc = copy_data = false; } if (!copy_data) { api_data->data = data; if (type == API_JSON) json_incref((json_t *)data); } else switch(type) { case API_ESCAPE: case API_STRING: case API_CONST: api_data->data = (void *)malloc(strlen((char *)data) + 1); strcpy((char*)(api_data->data), (char *)data); break; case API_UINT8: /* Most OSs won't really alloc less than 4 */ api_data->data = malloc(4); *(uint8_t *)api_data->data = *(uint8_t *)data; break; case API_UINT16: /* Most OSs won't really alloc less than 4 */ api_data->data = malloc(4); *(uint16_t *)api_data->data = *(uint16_t *)data; break; case API_INT: api_data->data = (void *)malloc(sizeof(int)); *((int *)(api_data->data)) = *((int *)data); break; case API_UINT: api_data->data = (void *)malloc(sizeof(unsigned int)); *((unsigned int *)(api_data->data)) = *((unsigned int *)data); break; case API_UINT32: api_data->data = (void *)malloc(sizeof(uint32_t)); *((uint32_t *)(api_data->data)) = *((uint32_t *)data); break; case API_UINT64: api_data->data = (void *)malloc(sizeof(uint64_t)); *((uint64_t *)(api_data->data)) = *((uint64_t *)data); break; case API_DOUBLE: case API_ELAPSED: case API_MHS: case API_MHTOTAL: case API_UTILITY: case API_FREQ: case API_HS: case API_DIFF: case API_PERCENT: api_data->data = (void *)malloc(sizeof(double)); *((double *)(api_data->data)) = *((double *)data); break; case API_BOOL: api_data->data = (void *)malloc(sizeof(bool)); *((bool *)(api_data->data)) = *((bool *)data); break; case API_TIMEVAL: api_data->data = (void *)malloc(sizeof(struct timeval)); memcpy(api_data->data, data, sizeof(struct timeval)); break; case API_TIME: api_data->data = (void *)malloc(sizeof(time_t)); *(time_t *)(api_data->data) = *((time_t *)data); break; case API_VOLTS: case API_TEMP: api_data->data = (void *)malloc(sizeof(float)); *((float *)(api_data->data)) = *((float *)data); break; case API_JSON: api_data->data_was_malloc = false; api_data->data = (void *)json_deep_copy((json_t *)data); break; default: applog(LOG_ERR, "API: unknown1 data type %d ignored", type); api_data->type = API_STRING; api_data->data_was_malloc = false; api_data->data = (void *)UNKNOWN; break; } return root; } struct api_data *api_add_escape(struct api_data *root, char *name, char *data, bool copy_data) { return api_add_data_full(root, name, API_ESCAPE, (void *)data, copy_data); } struct api_data *api_add_string(struct api_data *root, char *name, const char *data, bool copy_data) { return api_add_data_full(root, name, API_STRING, (void *)data, copy_data); } struct api_data *api_add_const(struct api_data *root, char *name, const char *data, bool copy_data) { return api_add_data_full(root, name, API_CONST, (void *)data, copy_data); } struct api_data *api_add_uint8(struct api_data *root, char *name, uint8_t *data, bool copy_data) { return api_add_data_full(root, name, API_UINT8, (void *)data, copy_data); } struct api_data *api_add_uint16(struct api_data *root, char *name, uint16_t *data, bool copy_data) { return api_add_data_full(root, name, API_UINT16, (void *)data, copy_data); } struct api_data *api_add_int(struct api_data *root, char *name, int *data, bool copy_data) { return api_add_data_full(root, name, API_INT, (void *)data, copy_data); } struct api_data *api_add_uint(struct api_data *root, char *name, unsigned int *data, bool copy_data) { return api_add_data_full(root, name, API_UINT, (void *)data, copy_data); } struct api_data *api_add_uint32(struct api_data *root, char *name, uint32_t *data, bool copy_data) { return api_add_data_full(root, name, API_UINT32, (void *)data, copy_data); } struct api_data *api_add_uint64(struct api_data *root, char *name, uint64_t *data, bool copy_data) { return api_add_data_full(root, name, API_UINT64, (void *)data, copy_data); } struct api_data *api_add_double(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_DOUBLE, (void *)data, copy_data); } struct api_data *api_add_elapsed(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_ELAPSED, (void *)data, copy_data); } struct api_data *api_add_bool(struct api_data *root, char *name, bool *data, bool copy_data) { return api_add_data_full(root, name, API_BOOL, (void *)data, copy_data); } struct api_data *api_add_timeval(struct api_data *root, char *name, struct timeval *data, bool copy_data) { return api_add_data_full(root, name, API_TIMEVAL, (void *)data, copy_data); } struct api_data *api_add_time(struct api_data *root, char *name, time_t *data, bool copy_data) { return api_add_data_full(root, name, API_TIME, (void *)data, copy_data); } struct api_data *api_add_mhs(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_MHS, (void *)data, copy_data); } struct api_data *api_add_mhtotal(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_MHTOTAL, (void *)data, copy_data); } struct api_data *api_add_temp(struct api_data *root, char *name, float *data, bool copy_data) { return api_add_data_full(root, name, API_TEMP, (void *)data, copy_data); } struct api_data *api_add_utility(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_UTILITY, (void *)data, copy_data); } struct api_data *api_add_freq(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_FREQ, (void *)data, copy_data); } struct api_data *api_add_volts(struct api_data *root, char *name, float *data, bool copy_data) { return api_add_data_full(root, name, API_VOLTS, (void *)data, copy_data); } struct api_data *api_add_hs(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_HS, (void *)data, copy_data); } struct api_data *api_add_diff(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_DIFF, (void *)data, copy_data); } struct api_data *api_add_json(struct api_data *root, char *name, json_t *data, bool copy_data) { return api_add_data_full(root, name, API_JSON, (void *)data, copy_data); } struct api_data *api_add_percent(struct api_data *root, char *name, double *data, bool copy_data) { return api_add_data_full(root, name, API_PERCENT, (void *)data, copy_data); } static struct api_data *print_data(struct api_data *root, char *buf, bool isjson, bool precom) { struct api_data *tmp; bool first = true; char *original, *escape; char *quote; *buf = '\0'; if (precom) { *(buf++) = *COMMA; *buf = '\0'; } if (isjson) { strcpy(buf, JSON0); buf = strchr(buf, '\0'); quote = JSON1; } else quote = (char *)BLANK; while (root) { if (!first) *(buf++) = *COMMA; else first = false; sprintf(buf, "%s%s%s%s", quote, root->name, quote, isjson ? ":" : "="); buf = strchr(buf, '\0'); switch(root->type) { case API_STRING: case API_CONST: sprintf(buf, "%s%s%s", quote, (char *)(root->data), quote); break; case API_ESCAPE: original = (char *)(root->data); escape = escape_string((char *)(root->data), isjson); sprintf(buf, "%s%s%s", quote, escape, quote); if (escape != original) free(escape); break; case API_UINT8: sprintf(buf, "%u", *(uint8_t *)root->data); break; case API_UINT16: sprintf(buf, "%u", *(uint16_t *)root->data); break; case API_INT: sprintf(buf, "%d", *((int *)(root->data))); break; case API_UINT: sprintf(buf, "%u", *((unsigned int *)(root->data))); break; case API_UINT32: sprintf(buf, "%"PRIu32, *((uint32_t *)(root->data))); break; case API_UINT64: sprintf(buf, "%"PRIu64, *((uint64_t *)(root->data))); break; case API_TIME: sprintf(buf, "%lu", *((unsigned long *)(root->data))); break; case API_DOUBLE: sprintf(buf, "%f", *((double *)(root->data))); break; case API_ELAPSED: sprintf(buf, "%.0f", *((double *)(root->data))); break; case API_UTILITY: case API_FREQ: case API_MHS: sprintf(buf, "%.3f", *((double *)(root->data))); break; case API_VOLTS: sprintf(buf, "%.3f", *((float *)(root->data))); break; case API_MHTOTAL: sprintf(buf, "%.4f", *((double *)(root->data))); break; case API_HS: sprintf(buf, "%.15f", *((double *)(root->data))); break; case API_DIFF: sprintf(buf, "%.8f", *((double *)(root->data))); break; case API_BOOL: sprintf(buf, "%s", *((bool *)(root->data)) ? TRUESTR : FALSESTR); break; case API_TIMEVAL: sprintf(buf, "%"PRIu64".%06lu", (uint64_t)((struct timeval *)(root->data))->tv_sec, (unsigned long)((struct timeval *)(root->data))->tv_usec); break; case API_TEMP: sprintf(buf, "%.2f", *((float *)(root->data))); break; case API_JSON: escape = json_dumps((json_t *)(root->data), JSON_COMPACT); strcpy(buf, escape); free(escape); break; case API_PERCENT: sprintf(buf, "%.4f", *((double *)(root->data)) * 100.0); break; default: applog(LOG_ERR, "API: unknown2 data type %d ignored", root->type); sprintf(buf, "%s%s%s", quote, UNKNOWN, quote); break; } buf = strchr(buf, '\0'); free(root->name); if (root->type == API_JSON) json_decref((json_t *)root->data); if (root->data_was_malloc) free(root->data); if (root->next == root) { free(root); root = NULL; } else { tmp = root; root = tmp->next; root->prev = tmp->prev; root->prev->next = root; free(tmp); } } strcpy(buf, isjson ? JSON5 : SEPSTR); return root; } #ifdef HAVE_AN_FPGA static int numpgas() { int count = 0; int i; rd_lock(&devices_lock); for (i = 0; i < total_devices; i++) { #ifdef HAVE_OPENCL if (devices[i]->drv == &opencl_api) continue; #endif #ifdef WANT_CPUMINE if (devices[i]->drv == &cpu_drv) continue; #endif if (devices[i]->device != devices[i] && !per_proc) continue; ++count; } rd_unlock(&devices_lock); return count; } static int pgadevice(int pgaid) { int count = 0; int i; rd_lock(&devices_lock); for (i = 0; i < total_devices; i++) { #ifdef HAVE_OPENCL if (devices[i]->drv == &opencl_api) continue; #endif #ifdef WANT_CPUMINE if (devices[i]->drv == &cpu_drv) continue; #endif if (devices[i]->device != devices[i] && !per_proc) continue; ++count; if (count == (pgaid + 1)) goto foundit; } rd_unlock(&devices_lock); return -1; foundit: rd_unlock(&devices_lock); return i; } #endif // All replies (except BYE and RESTART) start with a message // thus for JSON, message() inserts JSON_START at the front // and send_result() adds JSON_END at the end static void message(struct io_data *io_data, int messageid, int paramid, char *param2, bool isjson) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; char buf2[TMPBUFSIZ]; char severity[2]; #ifdef HAVE_AN_FPGA int pga; #endif #ifdef WANT_CPUMINE int cpu; #endif int i; io_reinit(io_data); if (isjson) io_put(io_data, JSON_START JSON_STATUS); for (i = 0; codes[i].severity != SEVERITY_FAIL; i++) { if (codes[i].code == messageid) { switch (codes[i].severity) { case SEVERITY_WARN: severity[0] = 'W'; break; case SEVERITY_INFO: severity[0] = 'I'; break; case SEVERITY_SUCC: severity[0] = 'S'; break; case SEVERITY_ERR: default: severity[0] = 'E'; break; } severity[1] = '\0'; switch(codes[i].params) { case PARAM_COUNT: case PARAM_GPU: case PARAM_PGA: case PARAM_CPU: case PARAM_PID: sprintf(buf, codes[i].description, paramid); break; case PARAM_POOL: sprintf(buf, codes[i].description, paramid, pools[paramid]->rpc_url); break; #ifdef HAVE_OPENCL case PARAM_GPUMAX: sprintf(buf, codes[i].description, paramid, nDevs - 1); break; #endif #ifdef HAVE_AN_FPGA case PARAM_PGAMAX: pga = numpgas(); sprintf(buf, codes[i].description, paramid, pga - 1); break; #endif #ifdef WANT_CPUMINE case PARAM_CPUMAX: if (opt_n_threads > 0) cpu = num_processors; else cpu = 0; sprintf(buf, codes[i].description, paramid, cpu - 1); break; #endif case PARAM_PMAX: sprintf(buf, codes[i].description, total_pools); break; case PARAM_POOLMAX: sprintf(buf, codes[i].description, paramid, total_pools - 1); break; case PARAM_DMAX: #ifdef HAVE_AN_FPGA pga = numpgas(); #endif #ifdef WANT_CPUMINE if (opt_n_threads > 0) cpu = num_processors; else cpu = 0; #endif sprintf(buf, codes[i].description #ifdef HAVE_OPENCL , nDevs #endif #ifdef HAVE_AN_FPGA , pga #endif #ifdef WANT_CPUMINE , cpu #endif ); break; case PARAM_CMD: sprintf(buf, codes[i].description, JSON_COMMAND); break; case PARAM_STR: sprintf(buf, codes[i].description, param2); break; case PARAM_BOTH: sprintf(buf, codes[i].description, paramid, param2); break; case PARAM_BOOL: sprintf(buf, codes[i].description, paramid ? TRUESTR : FALSESTR); break; case PARAM_SET: sprintf(buf, codes[i].description, param2, paramid); break; case PARAM_NONE: default: strcpy(buf, codes[i].description); } root = api_add_string(root, _STATUS, severity, false); root = api_add_time(root, "When", &when, false); root = api_add_int(root, "Code", &messageid, false); root = api_add_escape(root, "Msg", buf, false); root = api_add_escape(root, "Description", opt_api_description, false); root = print_data(root, buf2, isjson, false); io_add(io_data, buf2); if (isjson) io_add(io_data, JSON_CLOSE); return; } } root = api_add_string(root, _STATUS, "F", false); root = api_add_time(root, "When", &when, false); int id = -1; root = api_add_int(root, "Code", &id, false); sprintf(buf, "%d", messageid); root = api_add_escape(root, "Msg", buf, false); root = api_add_escape(root, "Description", opt_api_description, false); root = print_data(root, buf2, isjson, false); io_add(io_data, buf2); if (isjson) io_add(io_data, JSON_CLOSE); } static void apiversion(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; message(io_data, MSG_VERSION, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_VERSION : _VERSION COMSTR); root = api_add_string(root, "CGMiner", VERSION, false); root = api_add_const(root, "API", APIVERSION, false); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } static void minerconfig(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; struct driver_registration *reg, *regtmp; int gpucount = 0; int pgacount = 0; int cpucount = 0; char *adlinuse = (char *)NO; #ifdef HAVE_ADL const char *adl = YES; int i; for (i = 0; i < nDevs; i++) { if (gpus[i].has_adl) { adlinuse = (char *)YES; break; } } #else const char *adl = NO; #endif #ifdef HAVE_OPENCL gpucount = nDevs; #endif #ifdef HAVE_AN_FPGA pgacount = numpgas(); #endif #ifdef WANT_CPUMINE cpucount = opt_n_threads > 0 ? num_processors : 0; #endif message(io_data, MSG_MINECONFIG, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_MINECONFIG : _MINECONFIG COMSTR); root = api_add_int(root, "GPU Count", &gpucount, false); root = api_add_int(root, "PGA Count", &pgacount, false); root = api_add_int(root, "CPU Count", &cpucount, false); root = api_add_int(root, "Pool Count", &total_pools, false); root = api_add_const(root, "ADL", (char *)adl, false); root = api_add_string(root, "ADL in use", adlinuse, false); root = api_add_const(root, "Strategy", strategies[pool_strategy].s, false); root = api_add_int(root, "Log Interval", &opt_log_interval, false); strcpy(buf, "" #ifdef USE_LIBMICROHTTPD " SGW" #endif #ifdef USE_LIBEVENT " SSM" #endif ); BFG_FOREACH_DRIVER_BY_DNAME(reg, regtmp) { const struct device_drv * const drv = reg->drv; tailsprintf(buf, sizeof(buf), " %s", drv->name); } root = api_add_const(root, "Device Code", &buf[1], true); root = api_add_const(root, "OS", OSINFO, false); root = api_add_bool(root, "Failover-Only", &opt_fail_only, false); root = api_add_int(root, "ScanTime", &opt_scantime, false); root = api_add_int(root, "Queue", &opt_queue, false); root = api_add_int(root, "Expiry", &opt_expiry, false); #if BLKMAKER_VERSION > 0 root = api_add_string(root, "Coinbase-Sig", opt_coinbase_sig, true); #endif root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } static const char* bool2str(bool b) { return b ? YES : NO; } static const char *status2str(enum alive status) { switch (status) { case LIFE_WELL: return ALIVE; case LIFE_SICK: return SICK; case LIFE_DEAD: case LIFE_DEAD2: return DEAD; case LIFE_NOSTART: return NOSTART; case LIFE_INIT: case LIFE_INIT2: return INIT; case LIFE_WAIT: return WAIT; case LIFE_MIXED: return "Mixed"; default: return UNKNOWN; } } static struct api_data *api_add_device_identifier(struct api_data *root, struct cgpu_info *cgpu) { root = api_add_string(root, "Name", cgpu->drv->name, false); root = api_add_int(root, "ID", &(cgpu->device_id), false); if (per_proc) root = api_add_int(root, "ProcID", &(cgpu->proc_id), false); return root; } static int find_index_by_cgpu(struct cgpu_info *cgpu) { int n = 0, i; rd_lock(&devices_lock); for (i = 0; i < total_devices; ++i) { if (devices[i] == cgpu) break; if (devices[i]->device != devices[i] && !per_proc) continue; if (cgpu->devtype == devices[i]->devtype) ++n; } rd_unlock(&devices_lock); return n; } static void devdetail_an(struct io_data *io_data, struct cgpu_info *cgpu, bool isjson, bool precom) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; int n; cgpu_utility(cgpu); n = find_index_by_cgpu(cgpu); root = api_add_int(root, "DEVDETAILS", &n, true); root = api_add_device_identifier(root, cgpu); if (!per_proc) root = api_add_int(root, "Processors", &cgpu->procs, false); root = api_add_string(root, "Driver", cgpu->drv->dname, false); if (cgpu->kname) root = api_add_string(root, "Kernel", cgpu->kname, false); if (cgpu->name) root = api_add_string(root, "Model", cgpu->name, false); if (cgpu->dev_manufacturer) root = api_add_string(root, "Manufacturer", cgpu->dev_manufacturer, false); if (cgpu->dev_product) root = api_add_string(root, "Product", cgpu->dev_product, false); if (cgpu->dev_serial) root = api_add_string(root, "Serial", cgpu->dev_serial, false); if (cgpu->device_path) root = api_add_string(root, "Device Path", cgpu->device_path, false); root = api_add_int(root, "Target Temperature", &cgpu->targettemp, false); root = api_add_int(root, "Cutoff Temperature", &cgpu->cutofftemp, false); if (cgpu->drv->get_api_extra_device_detail) root = api_add_extra(root, cgpu->drv->get_api_extra_device_detail(cgpu)); root = print_data(root, buf, isjson, precom); io_add(io_data, buf); } static void devstatus_an(struct io_data *io_data, struct cgpu_info *cgpu, bool isjson, bool precom) { struct cgpu_info *proc; struct api_data *root = NULL; char buf[TMPBUFSIZ]; int n; n = find_index_by_cgpu(cgpu); double runtime = cgpu_runtime(cgpu); bool enabled = false; double total_mhashes = 0, rolling = 0, utility = 0; enum alive status = cgpu->status; float temp = -1; int accepted = 0, rejected = 0, stale = 0, hw_errors = 0; int diff1 = 0, bad_nonces = 0; double diff_accepted = 0, diff_rejected = 0, diff_stale = 0; int last_share_pool = -1; time_t last_share_pool_time = -1, last_device_valid_work = -1; double last_share_diff = -1; int procs = per_proc ? 1 : cgpu->procs, i; for (i = 0, proc = cgpu; i < procs; ++i, proc = proc->next_proc) { cgpu_utility(proc); if (proc->deven != DEV_DISABLED) enabled = true; total_mhashes += proc->total_mhashes; rolling += proc->rolling; utility += proc->utility; accepted += proc->accepted; rejected += proc->rejected; stale += proc->stale; hw_errors += proc->hw_errors; diff1 += proc->diff1; diff_accepted += proc->diff_accepted; diff_rejected += proc->diff_rejected; diff_stale += proc->diff_stale; bad_nonces += proc->bad_nonces; if (status != proc->status) status = LIFE_MIXED; if (proc->temp > temp) temp = proc->temp; if (proc->last_share_pool_time > last_share_pool_time) { last_share_pool_time = proc->last_share_pool_time; last_share_pool = proc->last_share_pool; last_share_diff = proc->last_share_diff; } if (proc->last_device_valid_work > last_device_valid_work) last_device_valid_work = proc->last_device_valid_work; if (per_proc) break; } root = api_add_int(root, (char*)cgpu->devtype, &n, true); root = api_add_device_identifier(root, cgpu); root = api_add_string(root, "Enabled", bool2str(enabled), false); root = api_add_string(root, "Status", status2str(status), false); if (temp > 0) root = api_add_temp(root, "Temperature", &temp, false); root = api_add_elapsed(root, "Device Elapsed", &runtime, false); double mhs = total_mhashes / runtime; root = api_add_mhs(root, "MHS av", &mhs, false); char mhsname[27]; sprintf(mhsname, "MHS %ds", opt_log_interval); root = api_add_mhs(root, mhsname, &rolling, false); root = api_add_int(root, "Accepted", &accepted, false); root = api_add_int(root, "Rejected", &rejected, false); root = api_add_int(root, "Hardware Errors", &hw_errors, false); root = api_add_utility(root, "Utility", &utility, false); root = api_add_int(root, "Stale", &stale, false); if (last_share_pool != -1) { root = api_add_int(root, "Last Share Pool", &last_share_pool, false); root = api_add_time(root, "Last Share Time", &last_share_pool_time, false); } root = api_add_mhtotal(root, "Total MH", &total_mhashes, false); double work_utility = diff1 / runtime * 60; root = api_add_int(root, "Diff1 Work", &diff1, false); root = api_add_utility(root, "Work Utility", &work_utility, false); root = api_add_diff(root, "Difficulty Accepted", &diff_accepted, false); root = api_add_diff(root, "Difficulty Rejected", &diff_rejected, false); root = api_add_diff(root, "Difficulty Stale", &diff_stale, false); if (last_share_diff > 0) root = api_add_diff(root, "Last Share Difficulty", &last_share_diff, false); if (last_device_valid_work != -1) root = api_add_time(root, "Last Valid Work", &last_device_valid_work, false); double hwp = (bad_nonces + diff1) ? (double)(bad_nonces) / (double)(bad_nonces + diff1) : 0; root = api_add_percent(root, "Device Hardware%", &hwp, false); double rejp = diff1 ? (double)(diff_rejected) / (double)(diff1) : 0; root = api_add_percent(root, "Device Rejected%", &rejp, false); if ((per_proc || cgpu->procs <= 1) && cgpu->drv->get_api_extra_device_status) root = api_add_extra(root, cgpu->drv->get_api_extra_device_status(cgpu)); root = print_data(root, buf, isjson, precom); io_add(io_data, buf); } #ifdef HAVE_OPENCL static void gpustatus(struct io_data *io_data, int gpu, bool isjson, bool precom) { if (gpu < 0 || gpu >= nDevs) return; devstatus_an(io_data, &gpus[gpu], isjson, precom); } #endif #ifdef HAVE_AN_FPGA static void pgastatus(struct io_data *io_data, int pga, bool isjson, bool precom) { int dev = pgadevice(pga); if (dev < 0) // Should never happen return; devstatus_an(io_data, get_devices(dev), isjson, precom); } #endif #ifdef WANT_CPUMINE static void cpustatus(struct io_data *io_data, int cpu, bool isjson, bool precom) { if (opt_n_threads <= 0 || cpu < 0 || cpu >= num_processors) return; devstatus_an(io_data, &cpus[cpu], isjson, precom); } #endif static void devinfo_internal(void (*func)(struct io_data *, struct cgpu_info*, bool, bool), int msg, struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct cgpu_info *cgpu; bool io_open = false; int i; if (total_devices == 0) { message(io_data, MSG_NODEVS, 0, NULL, isjson); return; } message(io_data, msg, 0, NULL, isjson); if (isjson) io_open = io_add(io_data, COMSTR JSON_DEVS); for (i = 0; i < total_devices; ++i) { cgpu = get_devices(i); if (per_proc || cgpu->device == cgpu) func(io_data, cgpu, isjson, isjson && i > 0); } if (isjson && io_open) io_close(io_data); } static void devdetail(struct io_data *io_data, SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { return devinfo_internal(devdetail_an, MSG_DEVDETAILS, io_data, c, param, isjson, group); } static void devstatus(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { return devinfo_internal(devstatus_an, MSG_DEVS, io_data, c, param, isjson, group); } #ifdef HAVE_OPENCL static void gpudev(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { bool io_open = false; int id; if (nDevs == 0) { message(io_data, MSG_GPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= nDevs) { message(io_data, MSG_INVGPU, id, NULL, isjson); return; } message(io_data, MSG_GPUDEV, id, NULL, isjson); if (isjson) io_open = io_add(io_data, COMSTR JSON_GPU); gpustatus(io_data, id, isjson, false); if (isjson && io_open) io_close(io_data); } #endif static void devscan(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { int n; bool io_open = false; applog(LOG_DEBUG, "RPC: request to scan %s for devices", param); if (param && !param[0]) param = NULL; n = scan_serial(param); message(io_data, MSG_DEVSCAN, n, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_DEVS : _DEVS COMSTR); n = total_devices - n; for (int i = n; i < total_devices; ++i) devdetail_an(io_data, get_devices(i), isjson, i > n); if (isjson && io_open) io_close(io_data); } #ifdef HAVE_AN_FPGA static void pgadev(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { bool io_open = false; int numpga = numpgas(); int id; if (numpga == 0) { message(io_data, MSG_PGANON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= numpga) { message(io_data, MSG_INVPGA, id, NULL, isjson); return; } message(io_data, MSG_PGADEV, id, NULL, isjson); if (isjson) io_open = io_add(io_data, COMSTR JSON_PGA); pgastatus(io_data, id, isjson, false); if (isjson && io_open) io_close(io_data); } static void pgaenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct cgpu_info *cgpu, *proc; int numpga = numpgas(); int id; bool already; if (numpga == 0) { message(io_data, MSG_PGANON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= numpga) { message(io_data, MSG_INVPGA, id, NULL, isjson); return; } int dev = pgadevice(id); if (dev < 0) { // Should never happen message(io_data, MSG_INVPGA, id, NULL, isjson); return; } cgpu = get_devices(dev); applog(LOG_DEBUG, "API: request to pgaenable %s id %d device %d %s", per_proc ? "proc" : "dev", id, dev, cgpu->proc_repr_ns); already = true; int procs = per_proc ? 1 : cgpu->procs, i; for (i = 0, proc = cgpu; i < procs; ++i, proc = proc->next_proc) { if (proc->deven == DEV_DISABLED) { proc_enable(proc); already = false; } } if (already) { message(io_data, MSG_PGALRENA, id, NULL, isjson); return; } #if 0 /* A DISABLED device wont change status FIXME: should disabling make it WELL? */ if (cgpu->status != LIFE_WELL) { message(io_data, MSG_PGAUNW, id, NULL, isjson); return; } #endif message(io_data, MSG_PGAENA, id, NULL, isjson); } static void pgadisable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct cgpu_info *cgpu, *proc; int numpga = numpgas(); int id; bool already; if (numpga == 0) { message(io_data, MSG_PGANON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= numpga) { message(io_data, MSG_INVPGA, id, NULL, isjson); return; } int dev = pgadevice(id); if (dev < 0) { // Should never happen message(io_data, MSG_INVPGA, id, NULL, isjson); return; } cgpu = get_devices(dev); applog(LOG_DEBUG, "API: request to pgadisable %s id %d device %d %s", per_proc ? "proc" : "dev", id, dev, cgpu->proc_repr_ns); already = true; int procs = per_proc ? 1 : cgpu->procs, i; for (i = 0, proc = cgpu; i < procs; ++i, proc = proc->next_proc) { if (proc->deven != DEV_DISABLED) { cgpu->deven = DEV_DISABLED; already = false; } } if (already) { message(io_data, MSG_PGALRDIS, id, NULL, isjson); return; } message(io_data, MSG_PGADIS, id, NULL, isjson); } static void pgaidentify(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct cgpu_info *cgpu; struct device_drv *drv; int numpga = numpgas(); int id; if (numpga == 0) { message(io_data, MSG_PGANON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= numpga) { message(io_data, MSG_INVPGA, id, NULL, isjson); return; } int dev = pgadevice(id); if (dev < 0) { // Should never happen message(io_data, MSG_INVPGA, id, NULL, isjson); return; } cgpu = get_devices(dev); drv = cgpu->drv; if (drv->identify_device && drv->identify_device(cgpu)) message(io_data, MSG_PGAIDENT, id, NULL, isjson); else message(io_data, MSG_PGANOID, id, NULL, isjson); } #endif #ifdef WANT_CPUMINE static void cpudev(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { bool io_open = false; int id; if (opt_n_threads == 0) { message(io_data, MSG_CPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= num_processors) { message(io_data, MSG_INVCPU, id, NULL, isjson); return; } message(io_data, MSG_CPUDEV, id, NULL, isjson); if (isjson) io_open = io_add(io_data, COMSTR JSON_CPU); cpustatus(io_data, id, isjson, false); if (isjson && io_open) io_close(io_data); } #endif static void poolstatus(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open = false; char *status, *lp; int i; if (total_pools == 0) { message(io_data, MSG_NOPOOL, 0, NULL, isjson); return; } message(io_data, MSG_POOL, 0, NULL, isjson); if (isjson) io_open = io_add(io_data, COMSTR JSON_POOLS); for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; if (pool->removed) continue; switch (pool->enabled) { case POOL_DISABLED: status = (char *)DISABLED; break; case POOL_REJECTING: status = (char *)REJECTING; break; case POOL_ENABLED: if (pool->idle) status = (char *)DEAD; else status = (char *)ALIVE; break; default: status = (char *)UNKNOWN; break; } if (pool->hdr_path) lp = (char *)YES; else lp = (char *)NO; root = api_add_int(root, "POOL", &i, false); root = api_add_escape(root, "URL", pool->rpc_url, false); root = api_add_string(root, "Status", status, false); root = api_add_int(root, "Priority", &(pool->prio), false); root = api_add_int(root, "Quota", &pool->quota, false); root = api_add_string(root, "Long Poll", lp, false); root = api_add_uint(root, "Getworks", &(pool->getwork_requested), false); root = api_add_int(root, "Accepted", &(pool->accepted), false); root = api_add_int(root, "Rejected", &(pool->rejected), false); root = api_add_int(root, "Works", &pool->works, false); root = api_add_uint(root, "Discarded", &(pool->discarded_work), false); root = api_add_uint(root, "Stale", &(pool->stale_shares), false); root = api_add_uint(root, "Get Failures", &(pool->getfail_occasions), false); root = api_add_uint(root, "Remote Failures", &(pool->remotefail_occasions), false); root = api_add_escape(root, "User", pool->rpc_user, false); root = api_add_time(root, "Last Share Time", &(pool->last_share_time), false); root = api_add_int(root, "Diff1 Shares", &(pool->diff1), false); if (pool->rpc_proxy) { root = api_add_escape(root, "Proxy", pool->rpc_proxy, false); } else { root = api_add_const(root, "Proxy", BLANK, false); } root = api_add_diff(root, "Difficulty Accepted", &(pool->diff_accepted), false); root = api_add_diff(root, "Difficulty Rejected", &(pool->diff_rejected), false); root = api_add_diff(root, "Difficulty Stale", &(pool->diff_stale), false); root = api_add_diff(root, "Last Share Difficulty", &(pool->last_share_diff), false); root = api_add_bool(root, "Has Stratum", &(pool->has_stratum), false); root = api_add_bool(root, "Stratum Active", &(pool->stratum_active), false); if (pool->stratum_active) root = api_add_escape(root, "Stratum URL", pool->stratum_url, false); else root = api_add_const(root, "Stratum URL", BLANK, false); root = api_add_uint64(root, "Best Share", &(pool->best_diff), true); if (pool->admin_msg) root = api_add_escape(root, "Message", pool->admin_msg, true); double rejp = (pool->diff_accepted + pool->diff_rejected + pool->diff_stale) ? (double)(pool->diff_rejected) / (double)(pool->diff_accepted + pool->diff_rejected + pool->diff_stale) : 0; root = api_add_percent(root, "Pool Rejected%", &rejp, false); double stalep = (pool->diff_accepted + pool->diff_rejected + pool->diff_stale) ? (double)(pool->diff_stale) / (double)(pool->diff_accepted + pool->diff_rejected + pool->diff_stale) : 0; root = api_add_percent(root, "Pool Stale%", &stalep, false); root = print_data(root, buf, isjson, isjson && (i > 0)); io_add(io_data, buf); } if (isjson && io_open) io_close(io_data); } static void summary(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; double utility, mhs, work_utility; #ifdef WANT_CPUMINE char *algo = (char *)(algo_names[opt_algo]); if (algo == NULL) algo = (char *)NULLSTR; #endif message(io_data, MSG_SUMM, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_SUMMARY : _SUMMARY COMSTR); // stop hashmeter() changing some while copying mutex_lock(&hash_lock); utility = total_accepted / ( total_secs ? total_secs : 1 ) * 60; mhs = total_mhashes_done / total_secs; work_utility = total_diff1 / ( total_secs ? total_secs : 1 ) * 60; root = api_add_elapsed(root, "Elapsed", &(total_secs), true); #ifdef WANT_CPUMINE if (opt_n_threads) root = api_add_string(root, "Algorithm", algo, false); #endif root = api_add_mhs(root, "MHS av", &(mhs), false); char mhsname[27]; sprintf(mhsname, "MHS %ds", opt_log_interval); root = api_add_mhs(root, mhsname, &(total_rolling), false); root = api_add_uint(root, "Found Blocks", &(found_blocks), true); root = api_add_int(root, "Getworks", &(total_getworks), true); root = api_add_int(root, "Accepted", &(total_accepted), true); root = api_add_int(root, "Rejected", &(total_rejected), true); root = api_add_int(root, "Hardware Errors", &(hw_errors), true); root = api_add_utility(root, "Utility", &(utility), false); root = api_add_int(root, "Discarded", &(total_discarded), true); root = api_add_int(root, "Stale", &(total_stale), true); root = api_add_uint(root, "Get Failures", &(total_go), true); root = api_add_uint(root, "Local Work", &(local_work), true); root = api_add_uint(root, "Remote Failures", &(total_ro), true); root = api_add_uint(root, "Network Blocks", &(new_blocks), true); root = api_add_mhtotal(root, "Total MH", &(total_mhashes_done), true); root = api_add_int(root, "Diff1 Work", &total_diff1, true); root = api_add_utility(root, "Work Utility", &(work_utility), false); root = api_add_diff(root, "Difficulty Accepted", &(total_diff_accepted), true); root = api_add_diff(root, "Difficulty Rejected", &(total_diff_rejected), true); root = api_add_diff(root, "Difficulty Stale", &(total_diff_stale), true); root = api_add_uint64(root, "Best Share", &(best_diff), true); double hwp = (total_bad_nonces + total_diff1) ? (double)(total_bad_nonces) / (double)(total_bad_nonces + total_diff1) : 0; root = api_add_percent(root, "Device Hardware%", &hwp, false); double rejp = total_diff1 ? (double)(total_diff_rejected) / (double)(total_diff1) : 0; root = api_add_percent(root, "Device Rejected%", &rejp, false); double prejp = (total_diff_accepted + total_diff_rejected + total_diff_stale) ? (double)(total_diff_rejected) / (double)(total_diff_accepted + total_diff_rejected + total_diff_stale) : 0; root = api_add_percent(root, "Pool Rejected%", &prejp, false); double stalep = (total_diff_accepted + total_diff_rejected + total_diff_stale) ? (double)(total_diff_stale) / (double)(total_diff_accepted + total_diff_rejected + total_diff_stale) : 0; root = api_add_percent(root, "Pool Stale%", &stalep, false); mutex_unlock(&hash_lock); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } #ifdef HAVE_OPENCL static void gpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int id; if (!nDevs) { message(io_data, MSG_GPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= nDevs) { message(io_data, MSG_INVGPU, id, NULL, isjson); return; } applog(LOG_DEBUG, "API: request to gpuenable gpuid %d %s", id, gpus[id].proc_repr_ns); if (gpus[id].deven != DEV_DISABLED) { message(io_data, MSG_ALRENA, id, NULL, isjson); return; } if (gpus[id].status != LIFE_WELL) { message(io_data, MSG_GPUMRE, id, NULL, isjson); return; } proc_enable(&gpus[id]); message(io_data, MSG_GPUREN, id, NULL, isjson); } static void gpudisable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int id; if (nDevs == 0) { message(io_data, MSG_GPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= nDevs) { message(io_data, MSG_INVGPU, id, NULL, isjson); return; } applog(LOG_DEBUG, "API: request to gpudisable gpuid %d %s", id, gpus[id].proc_repr_ns); if (gpus[id].deven == DEV_DISABLED) { message(io_data, MSG_ALRDIS, id, NULL, isjson); return; } gpus[id].deven = DEV_DISABLED; message(io_data, MSG_GPUDIS, id, NULL, isjson); } static void gpurestart(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int id; if (nDevs == 0) { message(io_data, MSG_GPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= nDevs) { message(io_data, MSG_INVGPU, id, NULL, isjson); return; } reinit_device(&gpus[id]); message(io_data, MSG_GPUREI, id, NULL, isjson); } #endif static void gpucount(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; int numgpu = 0; #ifdef HAVE_OPENCL numgpu = nDevs; #endif message(io_data, MSG_NUMGPU, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_GPUS : _GPUS COMSTR); root = api_add_int(root, "Count", &numgpu, false); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } static void pgacount(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; int count = 0; #ifdef HAVE_AN_FPGA count = numpgas(); #endif message(io_data, MSG_NUMPGA, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_PGAS : _PGAS COMSTR); root = api_add_int(root, "Count", &count, false); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } #ifdef WANT_CPUMINE static void cpuenable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int id; if (opt_n_threads == 0) { message(io_data, MSG_CPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= opt_n_threads) { message(io_data, MSG_INVCPU, id, NULL, isjson); return; } applog(LOG_DEBUG, "API: request to cpuenable cpuid %d %s", id, cpus[id].proc_repr_ns); if (cpus[id].deven != DEV_DISABLED) { message(io_data, MSG_ALRENAC, id, NULL, isjson); return; } if (cpus[id].status != LIFE_WELL) { message(io_data, MSG_CPUMRE, id, NULL, isjson); return; } proc_enable(&cpus[id]); message(io_data, MSG_CPUREN, id, NULL, isjson); } static void cpudisable(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int id; if (opt_n_threads == 0) { message(io_data, MSG_CPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= opt_n_threads) { message(io_data, MSG_INVCPU, id, NULL, isjson); return; } applog(LOG_DEBUG, "API: request to cpudisable cpuid %d %s", id, cpus[id].proc_repr_ns); if (cpus[id].deven == DEV_DISABLED) { message(io_data, MSG_ALRDISC, id, NULL, isjson); return; } cpus[id].deven = DEV_DISABLED; message(io_data, MSG_CPUDIS, id, NULL, isjson); } static void cpurestart(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int id; if (opt_n_threads == 0) { message(io_data, MSG_CPUNON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= opt_n_threads) { message(io_data, MSG_INVCPU, id, NULL, isjson); return; } reinit_device(&cpus[id]); message(io_data, MSG_CPUREI, id, NULL, isjson); } #endif static void cpucount(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; int count = 0; #ifdef WANT_CPUMINE count = opt_n_threads > 0 ? num_processors : 0; #endif message(io_data, MSG_NUMCPU, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_CPUS : _CPUS COMSTR); root = api_add_int(root, "Count", &count, false); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } static void switchpool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct pool *pool; int id; if (total_pools == 0) { message(io_data, MSG_NOPOOL, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISPID, 0, NULL, isjson); return; } id = atoi(param); cg_rlock(&control_lock); if (id < 0 || id >= total_pools) { cg_runlock(&control_lock); message(io_data, MSG_INVPID, id, NULL, isjson); return; } pool = pools[id]; pool->enabled = POOL_ENABLED; cg_runlock(&control_lock); switch_pools(pool); message(io_data, MSG_SWITCHP, id, NULL, isjson); } static void copyadvanceafter(char ch, char **param, char **buf) { #define src_p (*param) #define dst_b (*buf) while (*src_p && *src_p != ch) { if (*src_p == '\\' && *(src_p+1) != '\0') src_p++; *(dst_b++) = *(src_p++); } if (*src_p) src_p++; *(dst_b++) = '\0'; } static bool pooldetails(char *param, char **url, char **user, char **pass) { char *ptr, *buf; ptr = buf = malloc(strlen(param)+1); if (unlikely(!buf)) quit(1, "Failed to malloc pooldetails buf"); *url = buf; // copy url copyadvanceafter(',', ¶m, &buf); if (!(*param)) // missing user goto exitsama; *user = buf; // copy user copyadvanceafter(',', ¶m, &buf); if (!*param) // missing pass goto exitsama; *pass = buf; // copy pass copyadvanceafter(',', ¶m, &buf); return true; exitsama: free(ptr); return false; } static void addpool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { char *url, *user, *pass; struct pool *pool; char *ptr; if (param == NULL || *param == '\0') { message(io_data, MSG_MISPDP, 0, NULL, isjson); return; } if (!pooldetails(param, &url, &user, &pass)) { ptr = escape_string(param, isjson); message(io_data, MSG_INVPDP, 0, ptr, isjson); if (ptr != param) free(ptr); ptr = NULL; return; } pool = add_pool(); detect_stratum(pool, url); add_pool_details(pool, true, url, user, pass); ptr = escape_string(url, isjson); message(io_data, MSG_ADDPOOL, 0, ptr, isjson); if (ptr != url) free(ptr); ptr = NULL; } static void enablepool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct pool *pool; int id; if (total_pools == 0) { message(io_data, MSG_NOPOOL, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISPID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= total_pools) { message(io_data, MSG_INVPID, id, NULL, isjson); return; } pool = pools[id]; if (pool->enabled == POOL_ENABLED) { message(io_data, MSG_ALRENAP, id, NULL, isjson); return; } pool->enabled = POOL_ENABLED; if (pool->prio < current_pool()->prio) switch_pools(pool); message(io_data, MSG_ENAPOOL, id, NULL, isjson); } static void poolpriority(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int i; switch (prioritize_pools(param, &i)) { case MSG_NOPOOL: message(io_data, MSG_NOPOOL, 0, NULL, isjson); return; case MSG_MISPID: message(io_data, MSG_MISPID, 0, NULL, isjson); return; case MSG_INVPID: message(io_data, MSG_INVPID, i, NULL, isjson); return; case MSG_DUPPID: message(io_data, MSG_DUPPID, i, NULL, isjson); return; case MSG_POOLPRIO: default: message(io_data, MSG_POOLPRIO, 0, NULL, isjson); return; } } static void poolquota(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct pool *pool; int quota, id; char *comma; if (total_pools == 0) { message(io_data, MSG_NOPOOL, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISPID, 0, NULL, isjson); return; } comma = strchr(param, ','); if (!comma) { message(io_data, MSG_CONVAL, 0, param, isjson); return; } *(comma++) = '\0'; id = atoi(param); if (id < 0 || id >= total_pools) { message(io_data, MSG_INVPID, id, NULL, isjson); return; } pool = pools[id]; quota = atoi(comma); if (quota < 0) { message(io_data, MSG_INVNEG, quota, pool->rpc_url, isjson); return; } pool->quota = quota; adjust_quota_gcd(); message(io_data, MSG_SETQUOTA, quota, pool->rpc_url, isjson); } static void disablepool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct pool *pool; int id; if (total_pools == 0) { message(io_data, MSG_NOPOOL, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISPID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= total_pools) { message(io_data, MSG_INVPID, id, NULL, isjson); return; } pool = pools[id]; if (pool->enabled == POOL_DISABLED) { message(io_data, MSG_ALRDISP, id, NULL, isjson); return; } if (enabled_pools <= 1) { message(io_data, MSG_DISLASTP, id, NULL, isjson); return; } pool->enabled = POOL_DISABLED; if (pool == current_pool()) switch_pools(NULL); message(io_data, MSG_DISPOOL, id, NULL, isjson); } static void removepool(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct pool *pool; char *rpc_url; bool dofree = false; int id; if (total_pools == 0) { message(io_data, MSG_NOPOOL, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISPID, 0, NULL, isjson); return; } id = atoi(param); if (id < 0 || id >= total_pools) { message(io_data, MSG_INVPID, id, NULL, isjson); return; } if (total_pools <= 1) { message(io_data, MSG_REMLASTP, id, NULL, isjson); return; } pool = pools[id]; if (pool == current_pool()) switch_pools(NULL); if (pool == current_pool()) { message(io_data, MSG_ACTPOOL, id, NULL, isjson); return; } pool->enabled = POOL_DISABLED; rpc_url = escape_string(pool->rpc_url, isjson); if (rpc_url != pool->rpc_url) dofree = true; remove_pool(pool); message(io_data, MSG_REMPOOL, id, rpc_url, isjson); if (dofree) free(rpc_url); rpc_url = NULL; } #ifdef HAVE_OPENCL static bool splitgpuvalue(struct io_data *io_data, char *param, int *gpu, char **value, bool isjson) { int id; char *gpusep; if (nDevs == 0) { message(io_data, MSG_GPUNON, 0, NULL, isjson); return false; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return false; } gpusep = strchr(param, GPUSEP); if (gpusep == NULL) { message(io_data, MSG_MISVAL, 0, NULL, isjson); return false; } *(gpusep++) = '\0'; id = atoi(param); if (id < 0 || id >= nDevs) { message(io_data, MSG_INVGPU, id, NULL, isjson); return false; } *gpu = id; *value = gpusep; return true; } static void gpuintensity(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { int id; char *value; int intensity; char intensitystr[7]; if (!splitgpuvalue(io_data, param, &id, &value, isjson)) return; if (!strncasecmp(value, DYNAMIC, 1)) { gpus[id].dynamic = true; strcpy(intensitystr, DYNAMIC); } else { intensity = atoi(value); if (intensity < MIN_INTENSITY || intensity > MAX_INTENSITY) { message(io_data, MSG_INVINT, 0, value, isjson); return; } gpus[id].dynamic = false; gpus[id].intensity = intensity; sprintf(intensitystr, "%d", intensity); } message(io_data, MSG_GPUINT, id, intensitystr, isjson); } static void gpumem(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { #ifdef HAVE_ADL int id; char *value; int clock; if (!splitgpuvalue(io_data, param, &id, &value, isjson)) return; clock = atoi(value); if (set_memoryclock(id, clock)) message(io_data, MSG_GPUMERR, id, value, isjson); else message(io_data, MSG_GPUMEM, id, value, isjson); #else message(io_data, MSG_NOADL, 0, NULL, isjson); #endif } static void gpuengine(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { #ifdef HAVE_ADL int id; char *value; int clock; if (!splitgpuvalue(io_data, param, &id, &value, isjson)) return; clock = atoi(value); if (set_engineclock(id, clock)) message(io_data, MSG_GPUEERR, id, value, isjson); else message(io_data, MSG_GPUENG, id, value, isjson); #else message(io_data, MSG_NOADL, 0, NULL, isjson); #endif } static void gpufan(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { #ifdef HAVE_ADL int id; char *value; int fan; if (!splitgpuvalue(io_data, param, &id, &value, isjson)) return; fan = atoi(value); if (set_fanspeed(id, fan)) message(io_data, MSG_GPUFERR, id, value, isjson); else message(io_data, MSG_GPUFAN, id, value, isjson); #else message(io_data, MSG_NOADL, 0, NULL, isjson); #endif } static void gpuvddc(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { #ifdef HAVE_ADL int id; char *value; float vddc; if (!splitgpuvalue(io_data, param, &id, &value, isjson)) return; vddc = atof(value); if (set_vddc(id, vddc)) message(io_data, MSG_GPUVERR, id, value, isjson); else message(io_data, MSG_GPUVDDC, id, value, isjson); #else message(io_data, MSG_NOADL, 0, NULL, isjson); #endif } #endif void doquit(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { if (isjson) io_put(io_data, JSON_START JSON_BYE); else io_put(io_data, _BYE); bye = true; do_a_quit = true; } void dorestart(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { if (isjson) io_put(io_data, JSON_START JSON_RESTART); else io_put(io_data, _RESTART); bye = true; do_a_restart = true; } void privileged(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { message(io_data, MSG_ACCOK, 0, NULL, isjson); } void notifystatus(struct io_data *io_data, int device, struct cgpu_info *cgpu, bool isjson, __maybe_unused char group) { struct cgpu_info *proc; struct api_data *root = NULL; char buf[TMPBUFSIZ]; char *reason; time_t last_not_well = 0; enum dev_reason uninitialised_var(enum_reason); int thread_fail_init_count = 0, thread_zero_hash_count = 0, thread_fail_queue_count = 0; int dev_sick_idle_60_count = 0, dev_dead_idle_600_count = 0; int dev_nostart_count = 0, dev_over_heat_count = 0, dev_thermal_cutoff_count = 0, dev_comms_error_count = 0, dev_throttle_count = 0; int procs = per_proc ? 1 : cgpu->procs, i; for (i = 0, proc = cgpu; i < procs; ++i, proc = proc->next_proc) { if (proc->device_last_not_well > last_not_well) { last_not_well = proc->device_last_not_well; enum_reason = proc->device_not_well_reason; thread_fail_init_count += proc->thread_fail_init_count; thread_zero_hash_count += proc->thread_zero_hash_count; thread_fail_queue_count += proc->thread_fail_queue_count; dev_sick_idle_60_count += proc->dev_sick_idle_60_count; dev_dead_idle_600_count += proc->dev_dead_idle_600_count; dev_nostart_count += proc->dev_nostart_count; dev_over_heat_count += proc->dev_over_heat_count; dev_thermal_cutoff_count += proc->dev_thermal_cutoff_count; dev_comms_error_count += proc->dev_comms_error_count; dev_throttle_count += proc->dev_throttle_count; } if (per_proc) break; } if (last_not_well == 0) reason = REASON_NONE; else switch (enum_reason) { case REASON_THREAD_FAIL_INIT: reason = REASON_THREAD_FAIL_INIT_STR; break; case REASON_THREAD_ZERO_HASH: reason = REASON_THREAD_ZERO_HASH_STR; break; case REASON_THREAD_FAIL_QUEUE: reason = REASON_THREAD_FAIL_QUEUE_STR; break; case REASON_DEV_SICK_IDLE_60: reason = REASON_DEV_SICK_IDLE_60_STR; break; case REASON_DEV_DEAD_IDLE_600: reason = REASON_DEV_DEAD_IDLE_600_STR; break; case REASON_DEV_NOSTART: reason = REASON_DEV_NOSTART_STR; break; case REASON_DEV_OVER_HEAT: reason = REASON_DEV_OVER_HEAT_STR; break; case REASON_DEV_THERMAL_CUTOFF: reason = REASON_DEV_THERMAL_CUTOFF_STR; break; case REASON_DEV_COMMS_ERROR: reason = REASON_DEV_COMMS_ERROR_STR; break; default: reason = REASON_UNKNOWN_STR; break; } // ALL counters (and only counters) must start the name with a '*' // Simplifies future external support for identifying new counters root = api_add_int(root, "NOTIFY", &device, false); root = api_add_device_identifier(root, cgpu); if (per_proc) root = api_add_time(root, "Last Well", &(cgpu->device_last_well), false); root = api_add_time(root, "Last Not Well", &last_not_well, false); root = api_add_string(root, "Reason Not Well", reason, false); root = api_add_int(root, "*Thread Fail Init", &thread_fail_init_count, false); root = api_add_int(root, "*Thread Zero Hash", &thread_zero_hash_count, false); root = api_add_int(root, "*Thread Fail Queue", &thread_fail_queue_count, false); root = api_add_int(root, "*Dev Sick Idle 60s", &dev_sick_idle_60_count, false); root = api_add_int(root, "*Dev Dead Idle 600s", &dev_dead_idle_600_count, false); root = api_add_int(root, "*Dev Nostart", &dev_nostart_count, false); root = api_add_int(root, "*Dev Over Heat", &dev_over_heat_count, false); root = api_add_int(root, "*Dev Thermal Cutoff", &dev_thermal_cutoff_count, false); root = api_add_int(root, "*Dev Comms Error", &dev_comms_error_count, false); root = api_add_int(root, "*Dev Throttle", &dev_throttle_count, false); root = print_data(root, buf, isjson, isjson && (device > 0)); io_add(io_data, buf); } static void notify(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, char group) { struct cgpu_info *cgpu; bool io_open = false; int i, n = 0; if (total_devices == 0) { message(io_data, MSG_NODEVS, 0, NULL, isjson); return; } message(io_data, MSG_NOTIFY, 0, NULL, isjson); if (isjson) io_open = io_add(io_data, COMSTR JSON_NOTIFY); for (i = 0; i < total_devices; i++) { cgpu = get_devices(i); if (cgpu->device == cgpu || per_proc) notifystatus(io_data, n++, cgpu, isjson, group); } if (isjson && io_open) io_close(io_data); } void dosave(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { char filename[PATH_MAX]; FILE *fcfg; char *ptr; if (param == NULL || *param == '\0') { default_save_file(filename); param = filename; } fcfg = fopen(param, "w"); if (!fcfg) { ptr = escape_string(param, isjson); message(io_data, MSG_BADFN, 0, ptr, isjson); if (ptr != param) free(ptr); ptr = NULL; return; } write_config(fcfg); fclose(fcfg); ptr = escape_string(param, isjson); message(io_data, MSG_SAVED, 0, ptr, isjson); if (ptr != param) free(ptr); ptr = NULL; } static int itemstats(struct io_data *io_data, int i, char *id, struct cgminer_stats *stats, struct cgminer_pool_stats *pool_stats, struct api_data *extra, bool isjson) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; double elapsed; root = api_add_int(root, "STATS", &i, false); root = api_add_string(root, "ID", id, false); elapsed = stats_elapsed(stats); root = api_add_elapsed(root, "Elapsed", &elapsed, false); root = api_add_uint32(root, "Calls", &(stats->getwork_calls), false); root = api_add_timeval(root, "Wait", &(stats->getwork_wait), false); root = api_add_timeval(root, "Max", &(stats->getwork_wait_max), false); root = api_add_timeval(root, "Min", &(stats->getwork_wait_min), false); if (pool_stats) { root = api_add_uint32(root, "Pool Calls", &(pool_stats->getwork_calls), false); root = api_add_uint32(root, "Pool Attempts", &(pool_stats->getwork_attempts), false); root = api_add_timeval(root, "Pool Wait", &(pool_stats->getwork_wait), false); root = api_add_timeval(root, "Pool Max", &(pool_stats->getwork_wait_max), false); root = api_add_timeval(root, "Pool Min", &(pool_stats->getwork_wait_min), false); root = api_add_double(root, "Pool Av", &(pool_stats->getwork_wait_rolling), false); root = api_add_bool(root, "Work Had Roll Time", &(pool_stats->hadrolltime), false); root = api_add_bool(root, "Work Can Roll", &(pool_stats->canroll), false); root = api_add_bool(root, "Work Had Expire", &(pool_stats->hadexpire), false); root = api_add_uint32(root, "Work Roll Time", &(pool_stats->rolltime), false); root = api_add_diff(root, "Work Diff", &(pool_stats->last_diff), false); root = api_add_diff(root, "Min Diff", &(pool_stats->min_diff), false); root = api_add_diff(root, "Max Diff", &(pool_stats->max_diff), false); root = api_add_uint32(root, "Min Diff Count", &(pool_stats->min_diff_count), false); root = api_add_uint32(root, "Max Diff Count", &(pool_stats->max_diff_count), false); root = api_add_uint64(root, "Times Sent", &(pool_stats->times_sent), false); root = api_add_uint64(root, "Bytes Sent", &(pool_stats->bytes_sent), false); root = api_add_uint64(root, "Times Recv", &(pool_stats->times_received), false); root = api_add_uint64(root, "Bytes Recv", &(pool_stats->bytes_received), false); root = api_add_uint64(root, "Net Bytes Sent", &(pool_stats->net_bytes_sent), false); root = api_add_uint64(root, "Net Bytes Recv", &(pool_stats->net_bytes_received), false); } if (extra) root = api_add_extra(root, extra); root = print_data(root, buf, isjson, isjson && (i > 0)); io_add(io_data, buf); return ++i; } static void minerstats(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct cgpu_info *cgpu; bool io_open = false; struct api_data *extra; char id[20]; int i, j; message(io_data, MSG_MINESTATS, 0, NULL, isjson); if (isjson) io_open = io_add(io_data, COMSTR JSON_MINESTATS); i = 0; for (j = 0; j < total_devices; j++) { cgpu = get_devices(j); if (cgpu && cgpu->drv) { if (cgpu->drv->get_api_stats) extra = cgpu->drv->get_api_stats(cgpu); else extra = NULL; i = itemstats(io_data, i, cgpu->proc_repr_ns, &(cgpu->cgminer_stats), NULL, extra, isjson); } } for (j = 0; j < total_pools; j++) { struct pool *pool = pools[j]; sprintf(id, "POOL%d", j); i = itemstats(io_data, i, id, &(pool->cgminer_stats), &(pool->cgminer_pool_stats), NULL, isjson); } if (isjson && io_open) io_close(io_data); } static void failoveronly(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { if (param == NULL || *param == '\0') { message(io_data, MSG_MISBOOL, 0, NULL, isjson); return; } *param = tolower(*param); if (*param != 't' && *param != 'f') { message(io_data, MSG_INVBOOL, 0, NULL, isjson); return; } bool tf = (*param == 't'); opt_fail_only = tf; message(io_data, MSG_FOO, tf, NULL, isjson); } static void minecoin(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; message(io_data, MSG_MINECOIN, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_MINECOIN : _MINECOIN COMSTR); #ifdef USE_SCRYPT if (opt_scrypt) root = api_add_const(root, "Hash Method", SCRYPTSTR, false); else #endif root = api_add_const(root, "Hash Method", SHA256STR, false); cg_rlock(&ch_lock); if (current_fullhash && *current_fullhash) { root = api_add_time(root, "Current Block Time", &block_time, true); root = api_add_string(root, "Current Block Hash", current_fullhash, true); } else { time_t t = 0; root = api_add_time(root, "Current Block Time", &t, true); root = api_add_const(root, "Current Block Hash", BLANK, false); } cg_runlock(&ch_lock); root = api_add_bool(root, "LP", &have_longpoll, false); root = api_add_diff(root, "Network Difficulty", ¤t_diff, true); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } static void debugstate(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; if (param == NULL) param = (char *)BLANK; else *param = tolower(*param); switch(*param) { case 's': opt_realquiet = true; break; case 'q': opt_quiet ^= true; break; case 'v': opt_log_output ^= true; if (opt_log_output) opt_quiet = false; break; case 'd': opt_debug ^= true; opt_log_output = opt_debug; if (opt_debug) opt_quiet = false; break; case 'r': opt_protocol ^= true; if (opt_protocol) opt_quiet = false; break; case 'p': want_per_device_stats ^= true; opt_log_output = want_per_device_stats; break; case 'n': opt_log_output = false; opt_debug = false; opt_quiet = false; opt_protocol = false; want_per_device_stats = false; opt_worktime = false; break; case 'w': opt_worktime ^= true; break; #ifdef _MEMORY_DEBUG case 'y': cgmemspeedup(); break; case 'z': cgmemrpt(); break; #endif default: // anything else just reports the settings break; } message(io_data, MSG_DEBUGSET, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_DEBUGSET : _DEBUGSET COMSTR); root = api_add_bool(root, "Silent", &opt_realquiet, false); root = api_add_bool(root, "Quiet", &opt_quiet, false); root = api_add_bool(root, "Verbose", &opt_log_output, false); root = api_add_bool(root, "Debug", &opt_debug, false); root = api_add_bool(root, "RPCProto", &opt_protocol, false); root = api_add_bool(root, "PerDevice", &want_per_device_stats, false); root = api_add_bool(root, "WorkTime", &opt_worktime, false); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } extern void stratumsrv_change_port(); static void setconfig(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { char *comma; int value; if (param == NULL || *param == '\0') { message(io_data, MSG_CONPAR, 0, NULL, isjson); return; } comma = strchr(param, ','); if (!comma) { message(io_data, MSG_CONVAL, 0, param, isjson); return; } *(comma++) = '\0'; #if BLKMAKER_VERSION > 0 if (strcasecmp(param, "coinbase-sig") == 0) { free(opt_coinbase_sig); opt_coinbase_sig = strdup(comma); message(io_data, MSG_SETCONFIG, 1, param, isjson); return; } #endif value = atoi(comma); if (value < 0 || value > 9999) { message(io_data, MSG_INVNUM, value, param, isjson); return; } if (strcasecmp(param, "queue") == 0) opt_queue = value; else if (strcasecmp(param, "scantime") == 0) opt_scantime = value; else if (strcasecmp(param, "expiry") == 0) opt_expiry = value; #ifdef USE_LIBMICROHTTPD else if (strcasecmp(param, "http-port") == 0) { httpsrv_stop(); httpsrv_port = value; if (httpsrv_port != -1) httpsrv_start(httpsrv_port); } #endif #ifdef USE_LIBEVENT else if (strcasecmp(param, "stratum-port") == 0) { stratumsrv_port = value; stratumsrv_change_port(); } #endif else { message(io_data, MSG_UNKCON, 0, param, isjson); return; } message(io_data, MSG_SETCONFIG, value, param, isjson); } #ifdef HAVE_AN_FPGA static void pgaset(struct io_data *io_data, __maybe_unused SOCKETTYPE c, __maybe_unused char *param, bool isjson, __maybe_unused char group) { struct cgpu_info *cgpu; struct device_drv *drv; char buf[TMPBUFSIZ]; int numpga = numpgas(); if (numpga == 0) { message(io_data, MSG_PGANON, 0, NULL, isjson); return; } if (param == NULL || *param == '\0') { message(io_data, MSG_MISID, 0, NULL, isjson); return; } char *opt = strchr(param, ','); if (opt) *(opt++) = '\0'; if (!opt || !*opt) { message(io_data, MSG_MISPGAOPT, 0, NULL, isjson); return; } int id = atoi(param); if (id < 0 || id >= numpga) { message(io_data, MSG_INVPGA, id, NULL, isjson); return; } int dev = pgadevice(id); if (dev < 0) { // Should never happen message(io_data, MSG_INVPGA, id, NULL, isjson); return; } cgpu = get_devices(dev); drv = cgpu->drv; char *set = strchr(opt, ','); if (set) *(set++) = '\0'; if (!drv->set_device) message(io_data, MSG_PGANOSET, id, NULL, isjson); else { char *ret = drv->set_device(cgpu, opt, set, buf); if (ret) { if (strcasecmp(opt, "help") == 0) message(io_data, MSG_PGAHELP, id, ret, isjson); else message(io_data, MSG_PGASETERR, id, ret, isjson); } else message(io_data, MSG_PGASETOK, id, NULL, isjson); } } #endif static void dozero(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, __maybe_unused char group) { if (param == NULL || *param == '\0') { message(io_data, MSG_ZERMIS, 0, NULL, isjson); return; } char *sum = strchr(param, ','); if (sum) *(sum++) = '\0'; if (!sum || !*sum) { message(io_data, MSG_MISBOOL, 0, NULL, isjson); return; } bool all = false; bool bs = false; if (strcasecmp(param, "all") == 0) all = true; else if (strcasecmp(param, "bestshare") == 0) bs = true; if (all == false && bs == false) { message(io_data, MSG_ZERINV, 0, param, isjson); return; } *sum = tolower(*sum); if (*sum != 't' && *sum != 'f') { message(io_data, MSG_INVBOOL, 0, NULL, isjson); return; } bool dosum = (*sum == 't'); if (dosum) print_summary(); if (all) zero_stats(); if (bs) zero_bestshare(); if (dosum) message(io_data, MSG_ZERSUM, 0, all ? "All" : "BestShare", isjson); else message(io_data, MSG_ZERNOSUM, 0, all ? "All" : "BestShare", isjson); } static void checkcommand(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, char group); struct CMDS { char *name; void (*func)(struct io_data *, SOCKETTYPE, char *, bool, char); bool iswritemode; } cmds[] = { { "version", apiversion, false }, { "config", minerconfig, false }, { "devscan", devscan, true }, { "devs", devstatus, false }, { "procs", devstatus, false }, { "pools", poolstatus, false }, { "summary", summary, false }, #ifdef HAVE_OPENCL { "gpuenable", gpuenable, true }, { "gpudisable", gpudisable, true }, { "gpurestart", gpurestart, true }, { "gpu", gpudev, false }, #endif #ifdef HAVE_AN_FPGA { "pga", pgadev, false }, { "pgaenable", pgaenable, true }, { "pgadisable", pgadisable, true }, { "pgaidentify", pgaidentify, true }, { "proc", pgadev, false }, { "procenable", pgaenable, true }, { "procdisable", pgadisable, true }, { "procidentify", pgaidentify, true }, #endif #ifdef WANT_CPUMINE { "cpuenable", cpuenable, true }, { "cpudisable", cpudisable, true }, { "cpurestart", cpurestart, true }, { "cpu", cpudev, false }, #endif { "gpucount", gpucount, false }, { "pgacount", pgacount, false }, { "proccount", pgacount, false }, { "cpucount", cpucount, false }, { "switchpool", switchpool, true }, { "addpool", addpool, true }, { "poolpriority", poolpriority, true }, { "poolquota", poolquota, true }, { "enablepool", enablepool, true }, { "disablepool", disablepool, true }, { "removepool", removepool, true }, #ifdef HAVE_OPENCL { "gpuintensity", gpuintensity, true }, { "gpumem", gpumem, true }, { "gpuengine", gpuengine, true }, { "gpufan", gpufan, true }, { "gpuvddc", gpuvddc, true }, #endif { "save", dosave, true }, { "quit", doquit, true }, { "privileged", privileged, true }, { "notify", notify, false }, { "procnotify", notify, false }, { "devdetails", devdetail, false }, { "procdetails", devdetail, false }, { "restart", dorestart, true }, { "stats", minerstats, false }, { "check", checkcommand, false }, { "failover-only", failoveronly, true }, { "coin", minecoin, false }, { "debug", debugstate, true }, { "setconfig", setconfig, true }, #ifdef HAVE_AN_FPGA { "pgaset", pgaset, true }, { "procset", pgaset, true }, #endif { "zero", dozero, true }, { NULL, NULL, false } }; static void checkcommand(struct io_data *io_data, __maybe_unused SOCKETTYPE c, char *param, bool isjson, char group) { struct api_data *root = NULL; char buf[TMPBUFSIZ]; bool io_open; char cmdbuf[100]; bool found, access; int i; if (param == NULL || *param == '\0') { message(io_data, MSG_MISCHK, 0, NULL, isjson); return; } found = false; access = false; for (i = 0; cmds[i].name != NULL; i++) { if (strcmp(cmds[i].name, param) == 0) { found = true; sprintf(cmdbuf, "|%s|", param); if (ISPRIVGROUP(group) || strstr(COMMANDS(group), cmdbuf)) access = true; break; } } message(io_data, MSG_CHECK, 0, NULL, isjson); io_open = io_add(io_data, isjson ? COMSTR JSON_CHECK : _CHECK COMSTR); root = api_add_const(root, "Exists", found ? YES : NO, false); root = api_add_const(root, "Access", access ? YES : NO, false); root = print_data(root, buf, isjson, false); io_add(io_data, buf); if (isjson && io_open) io_close(io_data); } static void send_result(struct io_data *io_data, SOCKETTYPE c, bool isjson) { if (io_data->close) io_add(io_data, JSON_CLOSE); if (isjson) io_add(io_data, JSON_END); // Null-terminate reply, including sending the \0 on the socket bytes_append(&io_data->data, "", 1); applog(LOG_DEBUG, "API: send reply: (%ld) '%.10s%s'", (long)bytes_len(&io_data->data), bytes_buf(&io_data->data), bytes_len(&io_data->data) > 10 ? "..." : BLANK); io_flush(io_data, true); if (bytes_len(&io_data->data)) applog(LOG_WARNING, "RPC: Timed out with %ld bytes left to send", (long)bytes_len(&io_data->data)); } static void tidyup(__maybe_unused void *arg) { mutex_lock(&quit_restart_lock); SOCKETTYPE *apisock = (SOCKETTYPE *)arg; bye = true; if (*apisock != INVSOCK) { shutdown(*apisock, SHUT_RDWR); CLOSESOCKET(*apisock); *apisock = INVSOCK; free(apisock); } if (ipaccess != NULL) { free(ipaccess); ipaccess = NULL; } io_free(); mutex_unlock(&quit_restart_lock); } /* * Interpret --api-groups G:cmd1:cmd2:cmd3,P:cmd4,*,... */ static void setup_groups() { char *api_groups = opt_api_groups ? opt_api_groups : (char *)BLANK; char *buf, *ptr, *next, *colon; char group; char commands[TMPBUFSIZ]; char cmdbuf[100]; char *cmd; bool addstar, did; int i; buf = malloc(strlen(api_groups) + 1); if (unlikely(!buf)) quit(1, "Failed to malloc ipgroups buf"); strcpy(buf, api_groups); next = buf; // for each group defined while (next && *next) { ptr = next; next = strchr(ptr, ','); if (next) *(next++) = '\0'; // Validate the group if (*(ptr+1) != ':') { colon = strchr(ptr, ':'); if (colon) *colon = '\0'; quit(1, "API invalid group name '%s'", ptr); } group = GROUP(*ptr); if (!VALIDGROUP(group)) quit(1, "API invalid group name '%c'", *ptr); if (group == PRIVGROUP) quit(1, "API group name can't be '%c'", PRIVGROUP); if (group == NOPRIVGROUP) quit(1, "API group name can't be '%c'", NOPRIVGROUP); if (apigroups[GROUPOFFSET(group)].commands != NULL) quit(1, "API duplicate group name '%c'", *ptr); ptr += 2; // Validate the command list (and handle '*') cmd = &(commands[0]); *(cmd++) = SEPARATOR; *cmd = '\0'; addstar = false; while (ptr && *ptr) { colon = strchr(ptr, ':'); if (colon) *(colon++) = '\0'; if (strcmp(ptr, "*") == 0) addstar = true; else { did = false; for (i = 0; cmds[i].name != NULL; i++) { if (strcasecmp(ptr, cmds[i].name) == 0) { did = true; break; } } if (did) { // skip duplicates sprintf(cmdbuf, "|%s|", cmds[i].name); if (strstr(commands, cmdbuf) == NULL) { strcpy(cmd, cmds[i].name); cmd += strlen(cmds[i].name); *(cmd++) = SEPARATOR; *cmd = '\0'; } } else { quit(1, "API unknown command '%s' in group '%c'", ptr, group); } } ptr = colon; } // * = allow all non-iswritemode commands if (addstar) { for (i = 0; cmds[i].name != NULL; i++) { if (cmds[i].iswritemode == false) { // skip duplicates sprintf(cmdbuf, "|%s|", cmds[i].name); if (strstr(commands, cmdbuf) == NULL) { strcpy(cmd, cmds[i].name); cmd += strlen(cmds[i].name); *(cmd++) = SEPARATOR; *cmd = '\0'; } } } } ptr = apigroups[GROUPOFFSET(group)].commands = malloc(strlen(commands) + 1); if (unlikely(!ptr)) quit(1, "Failed to malloc group commands buf"); strcpy(ptr, commands); } // Now define R (NOPRIVGROUP) as all non-iswritemode commands cmd = &(commands[0]); *(cmd++) = SEPARATOR; *cmd = '\0'; for (i = 0; cmds[i].name != NULL; i++) { if (cmds[i].iswritemode == false) { strcpy(cmd, cmds[i].name); cmd += strlen(cmds[i].name); *(cmd++) = SEPARATOR; *cmd = '\0'; } } ptr = apigroups[GROUPOFFSET(NOPRIVGROUP)].commands = malloc(strlen(commands) + 1); if (unlikely(!ptr)) quit(1, "Failed to malloc noprivgroup commands buf"); strcpy(ptr, commands); // W (PRIVGROUP) is handled as a special case since it simply means all commands free(buf); return; } /* * Interpret [W:]IP[/Prefix][,[R|W:]IP2[/Prefix2][,...]] --api-allow option * special case of 0/0 allows /0 (means all IP addresses) */ #define ALLIP4 "0/0" /* * N.B. IP4 addresses are by Definition 32bit big endian on all platforms */ static void setup_ipaccess() { char *buf, *ptr, *comma, *slash, *dot; int ipcount, mask, octet, i; char group; buf = malloc(strlen(opt_api_allow) + 1); if (unlikely(!buf)) quit(1, "Failed to malloc ipaccess buf"); strcpy(buf, opt_api_allow); ipcount = 1; ptr = buf; while (*ptr) if (*(ptr++) == ',') ipcount++; // possibly more than needed, but never less ipaccess = calloc(ipcount, sizeof(struct IP4ACCESS)); if (unlikely(!ipaccess)) quit(1, "Failed to calloc ipaccess"); ips = 0; ptr = buf; while (ptr && *ptr) { while (*ptr == ' ' || *ptr == '\t') ptr++; if (*ptr == ',') { ptr++; continue; } comma = strchr(ptr, ','); if (comma) *(comma++) = '\0'; group = NOPRIVGROUP; if (VALIDGROUP(*ptr) && *(ptr+1) == ':') { if (DEFINEDGROUP(*ptr)) group = GROUP(*ptr); ptr += 2; } ipaccess[ips].group = group; if (strcmp(ptr, ALLIP4) == 0) ipaccess[ips].ip = ipaccess[ips].mask = 0; else { slash = strchr(ptr, '/'); if (!slash) ipaccess[ips].mask = 0xffffffff; else { *(slash++) = '\0'; mask = atoi(slash); if (mask < 1 || mask > 32) goto popipo; // skip invalid/zero ipaccess[ips].mask = 0; while (mask-- >= 0) { octet = 1 << (mask % 8); ipaccess[ips].mask |= (octet << (24 - (8 * (mask >> 3)))); } } ipaccess[ips].ip = 0; // missing default to '.0' for (i = 0; ptr && (i < 4); i++) { dot = strchr(ptr, '.'); if (dot) *(dot++) = '\0'; octet = atoi(ptr); if (octet < 0 || octet > 0xff) goto popipo; // skip invalid ipaccess[ips].ip |= (octet << (24 - (i * 8))); ptr = dot; } ipaccess[ips].ip &= ipaccess[ips].mask; } ips++; popipo: ptr = comma; } free(buf); } static void *quit_thread(__maybe_unused void *userdata) { RenameThread("rpc_quit"); // allow thread creator to finish whatever it's doing mutex_lock(&quit_restart_lock); mutex_unlock(&quit_restart_lock); if (opt_debug) applog(LOG_DEBUG, "API: killing BFGMiner"); kill_work(); return NULL; } static void *restart_thread(__maybe_unused void *userdata) { RenameThread("rpc_restart"); // allow thread creator to finish whatever it's doing mutex_lock(&quit_restart_lock); mutex_unlock(&quit_restart_lock); if (opt_debug) applog(LOG_DEBUG, "API: restarting BFGMiner"); app_restart(); return NULL; } static bool check_connect(struct sockaddr_in *cli, char **connectaddr, char *group) { bool addrok = false; int i; *connectaddr = inet_ntoa(cli->sin_addr); *group = NOPRIVGROUP; if (opt_api_allow) { int client_ip = htonl(cli->sin_addr.s_addr); for (i = 0; i < ips; i++) { if ((client_ip & ipaccess[i].mask) == ipaccess[i].ip) { addrok = true; *group = ipaccess[i].group; break; } } } else { if (opt_api_network) addrok = true; else addrok = (strcmp(*connectaddr, localaddr) == 0); } return addrok; } static void mcast() { struct sockaddr_in listen; struct ip_mreq grp; struct sockaddr_in came_from; time_t bindstart; const char *binderror; SOCKETTYPE mcast_sock; SOCKETTYPE reply_sock; socklen_t came_from_siz; char *connectaddr; ssize_t rep; int bound; int count; int reply_port; bool addrok; char group; char expect[] = "cgminer-"; // first 8 bytes constant char *expect_code; size_t expect_code_len; char buf[1024]; char replybuf[1024]; memset(&grp, 0, sizeof(grp)); grp.imr_multiaddr.s_addr = inet_addr(opt_api_mcast_addr); if (grp.imr_multiaddr.s_addr == INADDR_NONE) quit(1, "Invalid Multicast Address"); grp.imr_interface.s_addr = INADDR_ANY; mcast_sock = socket(AF_INET, SOCK_DGRAM, 0); int optval = 1; if (SOCKETFAIL(setsockopt(mcast_sock, SOL_SOCKET, SO_REUSEADDR, (void *)(&optval), sizeof(optval)))) { applog(LOG_ERR, "API mcast setsockopt SO_REUSEADDR failed (%s)%s", SOCKERRMSG, MUNAVAILABLE); goto die; } memset(&listen, 0, sizeof(listen)); listen.sin_family = AF_INET; listen.sin_addr.s_addr = INADDR_ANY; listen.sin_port = htons(opt_api_mcast_port); // try for more than 1 minute ... in case the old one hasn't completely gone yet bound = 0; bindstart = time(NULL); while (bound == 0) { if (SOCKETFAIL(bind(mcast_sock, (struct sockaddr *)(&listen), sizeof(listen)))) { binderror = SOCKERRMSG; if ((time(NULL) - bindstart) > 61) break; else cgsleep_ms(30000); } else bound = 1; } if (bound == 0) { applog(LOG_ERR, "API mcast bind to port %d failed (%s)%s", opt_api_port, binderror, MUNAVAILABLE); goto die; } if (SOCKETFAIL(setsockopt(mcast_sock, IPPROTO_IP, IP_ADD_MEMBERSHIP, (void *)(&grp), sizeof(grp)))) { applog(LOG_ERR, "API mcast join failed (%s)%s", SOCKERRMSG, MUNAVAILABLE); goto die; } expect_code_len = sizeof(expect) + strlen(opt_api_mcast_code); expect_code = malloc(expect_code_len+1); if (!expect_code) quit(1, "Failed to malloc mcast expect_code"); snprintf(expect_code, expect_code_len+1, "%s%s-", expect, opt_api_mcast_code); count = 0; while (80085) { cgsleep_ms(1000); count++; came_from_siz = sizeof(came_from); if (SOCKETFAIL(rep = recvfrom(mcast_sock, buf, sizeof(buf) - 1, 0, (struct sockaddr *)(&came_from), &came_from_siz))) { applog(LOG_DEBUG, "API mcast failed count=%d (%s) (%d)", count, SOCKERRMSG, (int)mcast_sock); continue; } addrok = check_connect(&came_from, &connectaddr, &group); applog(LOG_DEBUG, "API mcast from %s - %s", connectaddr, addrok ? "Accepted" : "Ignored"); if (!addrok) continue; buf[rep] = '\0'; if (rep > 0 && buf[rep-1] == '\n') buf[--rep] = '\0'; applog(LOG_DEBUG, "API mcast request rep=%d (%s) from %s:%d", (int)rep, buf, inet_ntoa(came_from.sin_addr), ntohs(came_from.sin_port)); if ((size_t)rep > expect_code_len && memcmp(buf, expect_code, expect_code_len) == 0) { reply_port = atoi(&buf[expect_code_len]); if (reply_port < 1 || reply_port > 65535) { applog(LOG_DEBUG, "API mcast request ignored - invalid port (%s)", &buf[expect_code_len]); } else { applog(LOG_DEBUG, "API mcast request OK port %s=%d", &buf[expect_code_len], reply_port); came_from.sin_port = htons(reply_port); reply_sock = socket(AF_INET, SOCK_DGRAM, 0); snprintf(replybuf, sizeof(replybuf), "cgm-%s-%d-%s", opt_api_mcast_code, opt_api_port, opt_api_mcast_des); rep = sendto(reply_sock, replybuf, strlen(replybuf)+1, 0, (struct sockaddr *)(&came_from), sizeof(came_from)); if (SOCKETFAIL(rep)) { applog(LOG_DEBUG, "API mcast send reply failed (%s) (%d)", SOCKERRMSG, (int)reply_sock); } else { applog(LOG_DEBUG, "API mcast send reply (%s) succeeded (%d) (%d)", replybuf, (int)rep, (int)reply_sock); } CLOSESOCKET(reply_sock); } } else applog(LOG_DEBUG, "API mcast request was no good"); } die: CLOSESOCKET(mcast_sock); } static void *mcast_thread(void *userdata) { pthread_detach(pthread_self()); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); RenameThread("api_mcast"); mcast(); return NULL; } void mcast_init() { struct thr_info *thr; thr = calloc(1, sizeof(*thr)); if (!thr) quit(1, "Failed to calloc mcast thr"); if (thr_info_create(thr, NULL, mcast_thread, thr)) quit(1, "API mcast thread create failed"); } void api(int api_thr_id) { struct io_data *io_data; struct thr_info bye_thr; char buf[TMPBUFSIZ]; char param_buf[TMPBUFSIZ]; SOCKETTYPE c; int n, bound; char *connectaddr; const char *binderror; struct timeval bindstart; short int port = opt_api_port; struct sockaddr_in serv; struct sockaddr_in cli; socklen_t clisiz; char cmdbuf[100]; char *cmd; char *param; bool addrok; char group; json_error_t json_err; json_t *json_config; json_t *json_val; bool isjson; bool did; int i; SOCKETTYPE *apisock; if (!opt_api_listen) { applog(LOG_DEBUG, "API not running%s", UNAVAILABLE); return; } apisock = malloc(sizeof(*apisock)); *apisock = INVSOCK; rpc_io_data = io_data = sock_io_new(); mutex_init(&quit_restart_lock); pthread_cleanup_push(tidyup, (void *)apisock); my_thr_id = api_thr_id; setup_groups(); if (opt_api_allow) { setup_ipaccess(); if (ips == 0) { applog(LOG_WARNING, "API not running (no valid IPs specified)%s", UNAVAILABLE); return; } } *apisock = socket(AF_INET, SOCK_STREAM, 0); if (*apisock == INVSOCK) { applog(LOG_ERR, "API1 initialisation failed (%s)%s", SOCKERRMSG, UNAVAILABLE); return; } memset(&serv, 0, sizeof(serv)); serv.sin_family = AF_INET; if (!opt_api_allow && !opt_api_network) { serv.sin_addr.s_addr = inet_addr(localaddr); if (serv.sin_addr.s_addr == (in_addr_t)INVINETADDR) { applog(LOG_ERR, "API2 initialisation failed (%s)%s", SOCKERRMSG, UNAVAILABLE); return; } } serv.sin_port = htons(port); #ifndef WIN32 // On linux with SO_REUSEADDR, bind will get the port if the previous // socket is closed (even if it is still in TIME_WAIT) but fail if // another program has it open - which is what we want int optval = 1; // If it doesn't work, we don't really care - just show a debug message if (SOCKETFAIL(setsockopt(*apisock, SOL_SOCKET, SO_REUSEADDR, (void *)(&optval), sizeof(optval)))) applog(LOG_DEBUG, "API setsockopt SO_REUSEADDR failed (ignored): %s", SOCKERRMSG); #else // On windows a 2nd program can bind to a port>1024 already in use unless // SO_EXCLUSIVEADDRUSE is used - however then the bind to a closed port // in TIME_WAIT will fail until the timeout - so we leave the options alone #endif // try for more than 1 minute ... in case the old one hasn't completely gone yet bound = 0; cgtime(&bindstart); while (bound == 0) { if (SOCKETFAIL(bind(*apisock, (struct sockaddr *)(&serv), sizeof(serv)))) { binderror = SOCKERRMSG; if (timer_elapsed(&bindstart, NULL) > 61) break; else { applog(LOG_WARNING, "API bind to port %d failed - trying again in 30sec", port); cgsleep_ms(30000); } } else bound = 1; } if (bound == 0) { applog(LOG_ERR, "API bind to port %d failed (%s)%s", port, binderror, UNAVAILABLE); return; } if (SOCKETFAIL(listen(*apisock, QUEUE))) { applog(LOG_ERR, "API3 initialisation failed (%s)%s", SOCKERRMSG, UNAVAILABLE); CLOSESOCKET(*apisock); return; } if (opt_api_allow) applog(LOG_WARNING, "API running in IP access mode on port %d", port); else { if (opt_api_network) applog(LOG_WARNING, "API running in UNRESTRICTED read access mode on port %d", port); else applog(LOG_WARNING, "API running in local read access mode on port %d", port); } if (opt_api_mcast) mcast_init(); while (!bye) { clisiz = sizeof(cli); if (SOCKETFAIL(c = accept(*apisock, (struct sockaddr *)(&cli), &clisiz))) { applog(LOG_ERR, "API failed (%s)%s", SOCKERRMSG, UNAVAILABLE); goto die; } addrok = check_connect(&cli, &connectaddr, &group); applog(LOG_DEBUG, "API: connection from %s - %s", connectaddr, addrok ? "Accepted" : "Ignored"); if (addrok) { n = recv(c, &buf[0], TMPBUFSIZ-1, 0); if (SOCKETFAIL(n)) buf[0] = '\0'; else buf[n] = '\0'; if (opt_debug) { if (SOCKETFAIL(n)) applog(LOG_DEBUG, "API: recv failed: %s", SOCKERRMSG); else applog(LOG_DEBUG, "API: recv command: (%d) '%s'", n, buf); } if (!SOCKETFAIL(n)) { // the time of the request in now when = time(NULL); io_reinit(io_data); io_data->sock = c; did = false; if (*buf != ISJSON) { isjson = false; param = strchr(buf, SEPARATOR); if (param != NULL) *(param++) = '\0'; cmd = buf; } else { isjson = true; param = NULL; #if JANSSON_MAJOR_VERSION > 2 || (JANSSON_MAJOR_VERSION == 2 && JANSSON_MINOR_VERSION > 0) json_config = json_loadb(buf, n, 0, &json_err); #elif JANSSON_MAJOR_VERSION > 1 json_config = json_loads(buf, 0, &json_err); #else json_config = json_loads(buf, &json_err); #endif if (!json_is_object(json_config)) { message(io_data, MSG_INVJSON, 0, NULL, isjson); send_result(io_data, c, isjson); did = true; } else { json_val = json_object_get(json_config, JSON_COMMAND); if (json_val == NULL) { message(io_data, MSG_MISCMD, 0, NULL, isjson); send_result(io_data, c, isjson); did = true; } else { if (!json_is_string(json_val)) { message(io_data, MSG_INVCMD, 0, NULL, isjson); send_result(io_data, c, isjson); did = true; } else { cmd = (char *)json_string_value(json_val); json_val = json_object_get(json_config, JSON_PARAMETER); if (json_is_string(json_val)) param = (char *)json_string_value(json_val); else if (json_is_integer(json_val)) { sprintf(param_buf, "%d", (int)json_integer_value(json_val)); param = param_buf; } else if (json_is_real(json_val)) { sprintf(param_buf, "%f", (double)json_real_value(json_val)); param = param_buf; } } } } } if (!did) for (i = 0; cmds[i].name != NULL; i++) { if (strcmp(cmd, cmds[i].name) == 0) { sprintf(cmdbuf, "|%s|", cmd); if (ISPRIVGROUP(group) || strstr(COMMANDS(group), cmdbuf)) { per_proc = !strncmp(cmds[i].name, "proc", 4); (cmds[i].func)(io_data, c, param, isjson, group); } else { message(io_data, MSG_ACCDENY, 0, cmds[i].name, isjson); applog(LOG_DEBUG, "API: access denied to '%s' for '%s' command", connectaddr, cmds[i].name); } send_result(io_data, c, isjson); did = true; break; } } if (isjson) json_decref(json_config); if (!did) { message(io_data, MSG_INVCMD, 0, NULL, isjson); send_result(io_data, c, isjson); } } } CLOSESOCKET(c); } die: /* Blank line fix for older compilers since pthread_cleanup_pop is a * macro that gets confused by a label existing immediately before it */ ; pthread_cleanup_pop(true); if (opt_debug) applog(LOG_DEBUG, "API: terminating due to: %s", do_a_quit ? "QUIT" : (do_a_restart ? "RESTART" : (bye ? "BYE" : "UNKNOWN!"))); mutex_lock(&quit_restart_lock); if (do_a_restart) { if (thr_info_create(&bye_thr, NULL, restart_thread, &bye_thr)) { mutex_unlock(&quit_restart_lock); quit(1, "API failed to initiate a restart - aborting"); } pthread_detach(bye_thr.pth); } else if (do_a_quit) { if (thr_info_create(&bye_thr, NULL, quit_thread, &bye_thr)) { mutex_unlock(&quit_restart_lock); quit(1, "API failed to initiate a clean quit - aborting"); } pthread_detach(bye_thr.pth); } mutex_unlock(&quit_restart_lock); } bfgminer-bfgminer-3.10.0/arg-nonnull.h000066400000000000000000000023001226556647300175730ustar00rootroot00000000000000/* A C macro for declaring that specific arguments must not be NULL. Copyright (C) 2009-2011 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* _GL_ARG_NONNULL((n,...,m)) tells the compiler and static analyzer tools that the values passed as arguments n, ..., m must be non-NULL pointers. n = 1 stands for the first argument, n = 2 for the second argument etc. */ #ifndef _GL_ARG_NONNULL # if (__GNUC__ == 3 && __GNUC_MINOR__ >= 3) || __GNUC__ > 3 # define _GL_ARG_NONNULL(params) __attribute__ ((__nonnull__ params)) # else # define _GL_ARG_NONNULL(params) # endif #endif bfgminer-bfgminer-3.10.0/autogen.sh000077500000000000000000000006761226556647300172050ustar00rootroot00000000000000#!/bin/sh -e # Written by Luke Dashjr in 2012 # This program is released under the terms of the Creative Commons "CC0 1.0 Universal" license and/or copyright waiver. bs_dir="$(dirname "$0")" if test -z "$NOSUBMODULES" ; then echo 'Getting submodules...' ( cd "${bs_dir}" git submodule update --init ) fi echo 'Running autoreconf -if...' ( cd "${bs_dir}" rm -rf autom4te.cache rm -f aclocal.m4 ltmain.sh autoreconf -if ${AC_FLAGS} ) bfgminer-bfgminer-3.10.0/avalonhost-raminst000077500000000000000000000032611226556647300207540ustar00rootroot00000000000000#!/bin/sh # Copyright 2013 Luke Dashjr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 3 of the License, or (at your option) any later # version. See COPYING for more details. set -e if test "$#" -lt 2; then echo "Usage: $0 " echo "Example: $0 testing bfgminer screen" echo "Installs to RAM, on an Avalon-host router" echo "Version can be 'stable', 'testing', or any 3-digit version; eg '3.1.0'" echo "Do NOT attempt to reverse (uninstall) except by rebooting" echo "Do NOT attempt to use this script more than once per boot" exit 1 fi die() { echo "$@" echo 'ABORTING' exit 1 } if ! grep TL-WR703N /proc/cpuinfo; then die 'This doesn'\''t seem to be an Avalon host system' fi if test "$USER" != "root"; then die "Must be run as root" fi echo "WARNING: If anything fails other than opkg and crontab, reboot ASAP" source /lib/functions.sh mkdir -p /tmp/root/.oldroot mount -o noatime,lowerdir=/rom,upperdir=/tmp/root -t overlayfs "overlayfs:/tmp/root" /mnt pivot /mnt /.oldroot mount -o noatime,move /.oldroot/rom /rom sed -i 's/\(^option[[:space:]]\+overlay_root[[:space:]]\+\).*//;T;d' /etc/opkg.conf { echo 'option overlay_root /' echo "src/gz bfgminer http://luke.dashjr.org/programs/bitcoin/files/bfgminer/$1/openwrt/12.09/ar71xx" } >> /etc/opkg.conf shift set +e opkg update opkg install "$@" crontab -r # disabled cgminer-monitor set -e mount -o noatime,lowerdir=/,upperdir=/overlay -t overlayfs "overlayfs:/overlay" /mnt pivot /mnt /.oldroot mount -o noatime,move /.oldroot/rom /rom bfgminer-bfgminer-3.10.0/bench_block.h000066400000000000000000000074721226556647300176070ustar00rootroot00000000000000#if !defined(__BENCH_BLOCK_H__) #define __BENCH_BLOCK_H__ 1 // Random work pulled from a pool #define CGMINER_BENCHMARK_BLOCK \ 0x00, 0x00, 0x00, 0x01, 0x20, 0x00, 0xD8, 0x07, 0x17, 0xC9, 0x13, 0x6F, 0xDC, 0xBE, 0xDE, 0xB7, \ 0xB2, 0x14, 0xEF, 0xD1, 0x72, 0x7F, 0xA3, 0x72, 0xB2, 0x5D, 0x88, 0xF0, 0x00, 0x00, 0x05, 0xAA, \ 0x00, 0x00, 0x00, 0x00, 0x92, 0x8B, 0x4C, 0x77, 0xF5, 0xB2, 0xE6, 0x56, 0x96, 0x27, 0xE0, 0x66, \ 0x3C, 0x5B, 0xDD, 0xDC, 0x88, 0x6A, 0x7D, 0x7C, 0x7B, 0x8C, 0xE4, 0x92, 0x38, 0x92, 0x58, 0x2E, \ 0x18, 0x4D, 0x95, 0x9E, 0x4E, 0x44, 0xF1, 0x5F, 0x1A, 0x08, 0xE1, 0xE5, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x02, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, \ 0x86, 0x7E, 0x3A, 0xAF, 0x37, 0x83, 0xAF, 0xA0, 0xB5, 0x33, 0x2C, 0x28, 0xED, 0xA9, 0x89, 0x3E, \ 0x0A, 0xB6, 0x46, 0x81, 0xC2, 0x71, 0x4F, 0x34, 0x5A, 0x74, 0x89, 0x0E, 0x2B, 0x04, 0xB3, 0x16, \ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, \ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA0, 0xF6, 0x09, 0x02, 0x00, 0x00, 0x00, 0x00, \ 0x55, 0xF1, 0x44, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x79, 0x63, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ #endif // !defined(__BENCH_BLOCK_H__) bfgminer-bfgminer-3.10.0/binloader.c000066400000000000000000000123651226556647300173050ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include "deviceapi.h" #include "logging.h" #include "miner.h" #include "util.h" #define bailout(...) do { \ applog(__VA_ARGS__); \ return NULL; \ } while(0) #define check_magic(L) do { \ if (1 != fread(buf, 1, 1, f)) \ bailout(LOG_ERR, "%s: Error reading bitstream ('%c')", \ repr, L); \ if (buf[0] != L) \ bailout(LOG_ERR, "%s: Firmware has wrong magic ('%c')", \ repr, L); \ } while(0) #define read_str(eng) do { \ if (1 != fread(buf, 2, 1, f)) \ bailout(LOG_ERR, "%s: Error reading bitstream (" eng " len)", \ repr); \ len = (ubuf[0] << 8) | ubuf[1]; \ if (len >= sizeof(buf)) \ bailout(LOG_ERR, "%s: Firmware " eng " too long", \ repr); \ if (1 != fread(buf, len, 1, f)) \ bailout(LOG_ERR, "%s: Error reading bitstream (" eng ")", \ repr); \ buf[len] = '\0'; \ } while(0) void _bitstream_not_found(const char *repr, const char *fn) { applog(LOG_ERR, "ERROR: Unable to load '%s', required for %s to work!", fn, repr); applog(LOG_ERR, "ERROR: Please read README.FPGA for instructions"); } FILE *open_xilinx_bitstream(const char *dname, const char *repr, const char *fwfile, unsigned long *out_len) { char buf[0x100]; unsigned char *ubuf = (unsigned char*)buf; unsigned long len; char *p; FILE *f = open_bitstream(dname, fwfile); if (!f) { _bitstream_not_found(repr, fwfile); return NULL; } if (1 != fread(buf, 2, 1, f)) bailout(LOG_ERR, "%s: Error reading bitstream (magic)", repr); if (buf[0] || buf[1] != 9) bailout(LOG_ERR, "%s: Firmware has wrong magic (9)", repr); if (-1 == fseek(f, 11, SEEK_CUR)) bailout(LOG_ERR, "%s: Firmware seek failed", repr); check_magic('a'); read_str("design name"); applog(LOG_DEBUG, "%s: Firmware file %s info:", repr, fwfile); applog(LOG_DEBUG, " Design name: %s", buf); p = strrchr(buf, ';') ?: buf; p = strrchr(buf, '=') ?: p; if (p[0] == '=') ++p; unsigned long fwusercode = (unsigned long)strtoll(p, &p, 16); if (p[0] != '\0') bailout(LOG_ERR, "%s: Bad usercode in bitstream file", repr); if (fwusercode == 0xffffffff) bailout(LOG_ERR, "%s: Firmware doesn't support user code", repr); applog(LOG_DEBUG, " Version: %u, build %u", (unsigned)((fwusercode >> 8) & 0xff), (unsigned)(fwusercode & 0xff)); check_magic('b'); read_str("part number"); applog(LOG_DEBUG, " Part number: %s", buf); check_magic('c'); read_str("build date"); applog(LOG_DEBUG, " Build date: %s", buf); check_magic('d'); read_str("build time"); applog(LOG_DEBUG, " Build time: %s", buf); check_magic('e'); if (1 != fread(buf, 4, 1, f)) bailout(LOG_ERR, "%s: Error reading bitstream (data len)", repr); len = ((unsigned long)ubuf[0] << 24) | ((unsigned long)ubuf[1] << 16) | (ubuf[2] << 8) | ubuf[3]; applog(LOG_DEBUG, " Bitstream size: %lu", len); *out_len = len; return f; } bool load_bitstream_intelhex(bytes_t *rv, const char *dname, const char *repr, const char *fn) { char buf[0x100]; size_t sz; uint8_t xsz, xrt; uint16_t xaddr; FILE *F = open_bitstream(dname, fn); if (!F) return false; while (!feof(F)) { if (unlikely(ferror(F))) { applog(LOG_ERR, "Error reading '%s'", fn); goto ihxerr; } if (!fgets(buf, sizeof(buf), F)) goto ihxerr; if (unlikely(buf[0] != ':')) goto ihxerr; if (unlikely(!( hex2bin(&xsz, &buf[1], 1) && hex2bin((unsigned char*)&xaddr, &buf[3], 2) && hex2bin(&xrt, &buf[7], 1) ))) { applog(LOG_ERR, "Error parsing in '%s'", fn); goto ihxerr; } switch (xrt) { case 0: // data break; case 1: // EOF fclose(F); return true; default: applog(LOG_ERR, "Unsupported record type in '%s'", fn); goto ihxerr; } xaddr = be16toh(xaddr); sz = bytes_len(rv); bytes_resize(rv, xaddr + xsz); if (sz < xaddr) memset(&bytes_buf(rv)[sz], 0xff, xaddr - sz); if (unlikely(!(hex2bin(&bytes_buf(rv)[xaddr], &buf[9], xsz)))) { applog(LOG_ERR, "Error parsing data in '%s'", fn); goto ihxerr; } // TODO: checksum } ihxerr: fclose(F); bytes_reset(rv); return false; } bool load_bitstream_bytes(bytes_t *rv, const char *dname, const char *repr, const char *fileprefix) { FILE *F; size_t fplen = strlen(fileprefix); char fnbuf[fplen + 4 + 1]; int e; bytes_reset(rv); memcpy(fnbuf, fileprefix, fplen); strcpy(&fnbuf[fplen], ".bin"); F = open_bitstream(dname, fnbuf); if (F) { char buf[0x100]; size_t sz; while ( (sz = fread(buf, 1, sizeof(buf), F)) ) bytes_append(rv, buf, sz); e = ferror(F); fclose(F); if (unlikely(e)) { applog(LOG_ERR, "Error reading '%s'", fnbuf); bytes_reset(rv); } else return true; } strcpy(&fnbuf[fplen], ".ihx"); if (load_bitstream_intelhex(rv, dname, repr, fnbuf)) return true; // TODO: Xilinx _bitstream_not_found(repr, fnbuf); return false; } bfgminer-bfgminer-3.10.0/binloader.h000066400000000000000000000010031226556647300172750ustar00rootroot00000000000000#ifndef BFG_BINLOADER_H #define BFG_BINLOADER_H #include #include #include "util.h" extern void _bitstream_not_found(const char *repr, const char *fn); extern FILE *open_xilinx_bitstream(const char *dname, const char *repr, const char *fwfile, unsigned long *out_len); extern bool load_bitstream_intelhex(bytes_t *out, const char *dname, const char *repr, const char *fn); extern bool load_bitstream_bytes(bytes_t *out, const char *dname, const char *repr, const char *fileprefix); #endif bfgminer-bfgminer-3.10.0/bitforce-firmware-flash.c000066400000000000000000000103451226556647300220440ustar00rootroot00000000000000/* * Copyright 2012 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #define _BSD_SOURCE #include #include #include #include #include #define BFL_FILE_MAGIC "BFLDATA" #define BFL_UPLOAD_MAGIC "NGH-STREAM" #define myassert(expr, n, ...) \ do { \ if (!(expr)) { \ fprintf(stderr, __VA_ARGS__); \ return n; \ } \ } while(0) #define ERRRESP(buf) buf, (buf[strlen(buf)-1] == '\n' ? "" : "\n") #define WAITFOROK(n, msg) \ do { \ myassert(fgets(buf, sizeof(buf), BFL), n, "Error reading response from " msg "\n"); \ myassert(!strcmp(buf, "OK\n"), n, "Invalid response from " msg ": %s%s", ERRRESP(buf)); \ } while(0) int main(int argc, char**argv) { myassert(argc == 3, 1, "Usage: %s \n", argv[0]); setbuf(stdout, NULL); // Check filename char *FWname = basename(strdup(argv[2])); size_t FWnameLen = strlen(FWname); myassert(FWnameLen <= 255, 0x0f, "Firmware filename '%s' is too long\n", FWname); uint8_t n8 = FWnameLen; // Open and check firmware file FILE *FW = fopen(argv[2], "r"); myassert(FW, 0x10, "Failed to open '%s' for reading\n", argv[2]); char buf[0x20]; myassert(1 == fread(buf, 7, 1, FW), 0x10, "Failed to read from '%s'\n", argv[2]); myassert(!memcmp(buf, BFL_FILE_MAGIC, sizeof(BFL_FILE_MAGIC)-1), 0x11, "'%s' doesn't look like a BFL firmware\n", argv[2]); myassert(!fseek(FW, 0, SEEK_END), 0x12, "Failed to find end of '%s'\n", argv[2]); long FWlen = ftell(FW); myassert(FWlen > 0, 0x12, "Couldn't get size of '%s'\n", argv[2]); myassert(!fseek(FW, 7, SEEK_SET), 0x12, "Failed to rewind firmware file after getting size\n"); FWlen -= 7; printf("Firmware file looks OK :)\n"); // Open device FILE *BFL = fopen(argv[1], "r+"); myassert(BFL, 0x20, "Failed to open '%s' for read/write\n", argv[1]); myassert(!setvbuf(BFL, NULL, _IOFBF, 1032), 0x21, "Failed to setup buffer for device"); // ZAX: Start firmware upload printf("Starting firmware upload... "); myassert(1 == fwrite("ZAX", 3, 1, BFL), 0x22, "Failed to issue ZAX command\n"); WAITFOROK(0x22, "ZAX"); // Firmware upload header myassert(1 == fwrite(BFL_UPLOAD_MAGIC, sizeof(BFL_UPLOAD_MAGIC)-1, 1, BFL), 0x23, "Failed to send firmware upload header (magic)\n"); uint32_t n32 = htonl(FWlen - FWlen / 6); myassert(1 == fwrite(&n32, sizeof(n32), 1, BFL), 0x23, "Failed to send firmware upload header (size)\n"); myassert(1 == fwrite("\0\0", 2 , 1, BFL), 0x23, "Failed to send firmware upload header (padding 1)\n"); myassert(1 == fwrite(&n8, sizeof(n8) , 1, BFL), 0x23, "Failed to send firmware upload header (filename length)\n"); myassert(1 == fwrite(FWname, n8 , 1, BFL), 0x23, "Failed to send firmware upload header (filename)\n"); myassert(1 == fwrite("\0>>>>>>>>", 9 , 1, BFL), 0x23, "Failed to send firmware upload header (padding 2)\n"); WAITFOROK(0x23, "firmware upload header"); printf("OK, sending...\n"); // Actual firmware upload long i, j; for (i = 0, j = 0; i < FWlen; ++i) { myassert(1 == fread(&n8, sizeof(n8), 1, FW), 0x30, "Error reading data from firmware file\n"); if (5 == i % 6) continue; n8 ^= 0x2f; myassert(1 == fwrite(&n8, sizeof(n8), 1, BFL), 0x31, "Error sending data to device\n"); if (!(++j % 0x400)) { myassert(1 == fwrite(">>>>>>>>", 8, 1, BFL), 0x32, "Error sending block-finish to device\n"); printf("\r%5.2f%% complete", (double)i * 100. / (double)FWlen); WAITFOROK(0x32, "block-finish"); } } printf("\r100%% complete :)\n"); myassert(1 == fwrite(">>>>>>>>", 8, 1, BFL), 0x3f, "Error sending upload-finished to device\n"); myassert(fgets(buf, sizeof(buf), BFL), 0x3f, "Error reading response from upload-finished\n"); myassert(!strcmp(buf, "DONE\n"), 0x3f, "Invalid response from upload-finished: %s%s", ERRRESP(buf)); // ZBX: Finish programming printf("Waiting for device... "); myassert(1 == fwrite("ZBX", 3, 1, BFL), 0x40, "Failed to issue ZBX command\n"); WAITFOROK(0x40, "ZBX"); printf("All done! Try mining to test the flash succeeded.\n"); return 0; } bfgminer-bfgminer-3.10.0/bitstreams/000077500000000000000000000000001226556647300173505ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/bitstreams/.gitignore000066400000000000000000000000001226556647300213260ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/c++defs.h000066400000000000000000000267531226556647300165730ustar00rootroot00000000000000/* C++ compatible function declaration macros. Copyright (C) 2010-2011 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #ifndef _GL_CXXDEFS_H #define _GL_CXXDEFS_H /* The three most frequent use cases of these macros are: * For providing a substitute for a function that is missing on some platforms, but is declared and works fine on the platforms on which it exists: #if @GNULIB_FOO@ # if !@HAVE_FOO@ _GL_FUNCDECL_SYS (foo, ...); # endif _GL_CXXALIAS_SYS (foo, ...); _GL_CXXALIASWARN (foo); #elif defined GNULIB_POSIXCHECK ... #endif * For providing a replacement for a function that exists on all platforms, but is broken/insufficient and needs to be replaced on some platforms: #if @GNULIB_FOO@ # if @REPLACE_FOO@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef foo # define foo rpl_foo # endif _GL_FUNCDECL_RPL (foo, ...); _GL_CXXALIAS_RPL (foo, ...); # else _GL_CXXALIAS_SYS (foo, ...); # endif _GL_CXXALIASWARN (foo); #elif defined GNULIB_POSIXCHECK ... #endif * For providing a replacement for a function that exists on some platforms but is broken/insufficient and needs to be replaced on some of them and is additionally either missing or undeclared on some other platforms: #if @GNULIB_FOO@ # if @REPLACE_FOO@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef foo # define foo rpl_foo # endif _GL_FUNCDECL_RPL (foo, ...); _GL_CXXALIAS_RPL (foo, ...); # else # if !@HAVE_FOO@ or if !@HAVE_DECL_FOO@ _GL_FUNCDECL_SYS (foo, ...); # endif _GL_CXXALIAS_SYS (foo, ...); # endif _GL_CXXALIASWARN (foo); #elif defined GNULIB_POSIXCHECK ... #endif */ /* _GL_EXTERN_C declaration; performs the declaration with C linkage. */ #if defined __cplusplus # define _GL_EXTERN_C extern "C" #else # define _GL_EXTERN_C extern #endif /* _GL_FUNCDECL_RPL (func, rettype, parameters_and_attributes); declares a replacement function, named rpl_func, with the given prototype, consisting of return type, parameters, and attributes. Example: _GL_FUNCDECL_RPL (open, int, (const char *filename, int flags, ...) _GL_ARG_NONNULL ((1))); */ #define _GL_FUNCDECL_RPL(func,rettype,parameters_and_attributes) \ _GL_FUNCDECL_RPL_1 (rpl_##func, rettype, parameters_and_attributes) #define _GL_FUNCDECL_RPL_1(rpl_func,rettype,parameters_and_attributes) \ _GL_EXTERN_C rettype rpl_func parameters_and_attributes /* _GL_FUNCDECL_SYS (func, rettype, parameters_and_attributes); declares the system function, named func, with the given prototype, consisting of return type, parameters, and attributes. Example: _GL_FUNCDECL_SYS (open, int, (const char *filename, int flags, ...) _GL_ARG_NONNULL ((1))); */ #define _GL_FUNCDECL_SYS(func,rettype,parameters_and_attributes) \ _GL_EXTERN_C rettype func parameters_and_attributes /* _GL_CXXALIAS_RPL (func, rettype, parameters); declares a C++ alias called GNULIB_NAMESPACE::func that redirects to rpl_func, if GNULIB_NAMESPACE is defined. Example: _GL_CXXALIAS_RPL (open, int, (const char *filename, int flags, ...)); */ #define _GL_CXXALIAS_RPL(func,rettype,parameters) \ _GL_CXXALIAS_RPL_1 (func, rpl_##func, rettype, parameters) #if defined __cplusplus && defined GNULIB_NAMESPACE # define _GL_CXXALIAS_RPL_1(func,rpl_func,rettype,parameters) \ namespace GNULIB_NAMESPACE \ { \ rettype (*const func) parameters = ::rpl_func; \ } \ _GL_EXTERN_C int _gl_cxxalias_dummy #else # define _GL_CXXALIAS_RPL_1(func,rpl_func,rettype,parameters) \ _GL_EXTERN_C int _gl_cxxalias_dummy #endif /* _GL_CXXALIAS_RPL_CAST_1 (func, rpl_func, rettype, parameters); is like _GL_CXXALIAS_RPL_1 (func, rpl_func, rettype, parameters); except that the C function rpl_func may have a slightly different declaration. A cast is used to silence the "invalid conversion" error that would otherwise occur. */ #if defined __cplusplus && defined GNULIB_NAMESPACE # define _GL_CXXALIAS_RPL_CAST_1(func,rpl_func,rettype,parameters) \ namespace GNULIB_NAMESPACE \ { \ rettype (*const func) parameters = \ reinterpret_cast(::rpl_func); \ } \ _GL_EXTERN_C int _gl_cxxalias_dummy #else # define _GL_CXXALIAS_RPL_CAST_1(func,rpl_func,rettype,parameters) \ _GL_EXTERN_C int _gl_cxxalias_dummy #endif /* _GL_CXXALIAS_SYS (func, rettype, parameters); declares a C++ alias called GNULIB_NAMESPACE::func that redirects to the system provided function func, if GNULIB_NAMESPACE is defined. Example: _GL_CXXALIAS_SYS (open, int, (const char *filename, int flags, ...)); */ #if defined __cplusplus && defined GNULIB_NAMESPACE /* If we were to write rettype (*const func) parameters = ::func; like above in _GL_CXXALIAS_RPL_1, the compiler could optimize calls better (remove an indirection through a 'static' pointer variable), but then the _GL_CXXALIASWARN macro below would cause a warning not only for uses of ::func but also for uses of GNULIB_NAMESPACE::func. */ # define _GL_CXXALIAS_SYS(func,rettype,parameters) \ namespace GNULIB_NAMESPACE \ { \ static rettype (*func) parameters = ::func; \ } \ _GL_EXTERN_C int _gl_cxxalias_dummy #else # define _GL_CXXALIAS_SYS(func,rettype,parameters) \ _GL_EXTERN_C int _gl_cxxalias_dummy #endif /* _GL_CXXALIAS_SYS_CAST (func, rettype, parameters); is like _GL_CXXALIAS_SYS (func, rettype, parameters); except that the C function func may have a slightly different declaration. A cast is used to silence the "invalid conversion" error that would otherwise occur. */ #if defined __cplusplus && defined GNULIB_NAMESPACE # define _GL_CXXALIAS_SYS_CAST(func,rettype,parameters) \ namespace GNULIB_NAMESPACE \ { \ static rettype (*func) parameters = \ reinterpret_cast(::func); \ } \ _GL_EXTERN_C int _gl_cxxalias_dummy #else # define _GL_CXXALIAS_SYS_CAST(func,rettype,parameters) \ _GL_EXTERN_C int _gl_cxxalias_dummy #endif /* _GL_CXXALIAS_SYS_CAST2 (func, rettype, parameters, rettype2, parameters2); is like _GL_CXXALIAS_SYS (func, rettype, parameters); except that the C function is picked among a set of overloaded functions, namely the one with rettype2 and parameters2. Two consecutive casts are used to silence the "cannot find a match" and "invalid conversion" errors that would otherwise occur. */ #if defined __cplusplus && defined GNULIB_NAMESPACE /* The outer cast must be a reinterpret_cast. The inner cast: When the function is defined as a set of overloaded functions, it works as a static_cast<>, choosing the designated variant. When the function is defined as a single variant, it works as a reinterpret_cast<>. The parenthesized cast syntax works both ways. */ # define _GL_CXXALIAS_SYS_CAST2(func,rettype,parameters,rettype2,parameters2) \ namespace GNULIB_NAMESPACE \ { \ static rettype (*func) parameters = \ reinterpret_cast( \ (rettype2(*)parameters2)(::func)); \ } \ _GL_EXTERN_C int _gl_cxxalias_dummy #else # define _GL_CXXALIAS_SYS_CAST2(func,rettype,parameters,rettype2,parameters2) \ _GL_EXTERN_C int _gl_cxxalias_dummy #endif /* _GL_CXXALIASWARN (func); causes a warning to be emitted when ::func is used but not when GNULIB_NAMESPACE::func is used. func must be defined without overloaded variants. */ #if defined __cplusplus && defined GNULIB_NAMESPACE # define _GL_CXXALIASWARN(func) \ _GL_CXXALIASWARN_1 (func, GNULIB_NAMESPACE) # define _GL_CXXALIASWARN_1(func,namespace) \ _GL_CXXALIASWARN_2 (func, namespace) /* To work around GCC bug , we enable the warning only when not optimizing. */ # if !__OPTIMIZE__ # define _GL_CXXALIASWARN_2(func,namespace) \ _GL_WARN_ON_USE (func, \ "The symbol ::" #func " refers to the system function. " \ "Use " #namespace "::" #func " instead.") # elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING # define _GL_CXXALIASWARN_2(func,namespace) \ extern __typeof__ (func) func # else # define _GL_CXXALIASWARN_2(func,namespace) \ _GL_EXTERN_C int _gl_cxxalias_dummy # endif #else # define _GL_CXXALIASWARN(func) \ _GL_EXTERN_C int _gl_cxxalias_dummy #endif /* _GL_CXXALIASWARN1 (func, rettype, parameters_and_attributes); causes a warning to be emitted when the given overloaded variant of ::func is used but not when GNULIB_NAMESPACE::func is used. */ #if defined __cplusplus && defined GNULIB_NAMESPACE # define _GL_CXXALIASWARN1(func,rettype,parameters_and_attributes) \ _GL_CXXALIASWARN1_1 (func, rettype, parameters_and_attributes, \ GNULIB_NAMESPACE) # define _GL_CXXALIASWARN1_1(func,rettype,parameters_and_attributes,namespace) \ _GL_CXXALIASWARN1_2 (func, rettype, parameters_and_attributes, namespace) /* To work around GCC bug , we enable the warning only when not optimizing. */ # if !__OPTIMIZE__ # define _GL_CXXALIASWARN1_2(func,rettype,parameters_and_attributes,namespace) \ _GL_WARN_ON_USE_CXX (func, rettype, parameters_and_attributes, \ "The symbol ::" #func " refers to the system function. " \ "Use " #namespace "::" #func " instead.") # elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING # define _GL_CXXALIASWARN1_2(func,rettype,parameters_and_attributes,namespace) \ extern __typeof__ (func) func # else # define _GL_CXXALIASWARN1_2(func,rettype,parameters_and_attributes,namespace) \ _GL_EXTERN_C int _gl_cxxalias_dummy # endif #else # define _GL_CXXALIASWARN1(func,rettype,parameters_and_attributes) \ _GL_EXTERN_C int _gl_cxxalias_dummy #endif #endif /* _GL_CXXDEFS_H */ bfgminer-bfgminer-3.10.0/ccan/000077500000000000000000000000001226556647300160775ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ccan/Makefile.am000066400000000000000000000003151226556647300201320ustar00rootroot00000000000000noinst_LIBRARIES = libccan.a libccan_a_SOURCES = compiler/compiler.h opt/helpers.c opt/opt.c opt/opt.h opt/parse.c opt/private.h opt/usage.c typesafe_cb/typesafe_cb.h libccan_a_CPPFLAGS = -I$(top_srcdir) bfgminer-bfgminer-3.10.0/ccan/compiler/000077500000000000000000000000001226556647300177115ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ccan/compiler/LICENSE000066400000000000000000000167271226556647300207330ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. bfgminer-bfgminer-3.10.0/ccan/compiler/_info000066400000000000000000000035161226556647300207330ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) any * later version. See LICENSE for more details. */ #include #include #include "config.h" /** * compiler - macros for common compiler extensions * * Abstracts away some compiler hints. Currently these include: * - COLD * For functions not called in fast paths (aka. cold functions) * - PRINTF_FMT * For functions which take printf-style parameters. * - IDEMPOTENT * For functions which return the same value for same parameters. * - NEEDED * For functions and variables which must be emitted even if unused. * - UNNEEDED * For functions and variables which need not be emitted if unused. * - UNUSED * For parameters which are not used. * - IS_COMPILE_CONSTANT * For using different tradeoffs for compiletime vs runtime evaluation. * * License: LGPL (3 or any later version) * Author: Rusty Russell * * Example: * #include * #include * #include * * // Example of a (slow-path) logging function. * static int log_threshold = 2; * static void COLD PRINTF_FMT(2,3) * logger(int level, const char *fmt, ...) * { * va_list ap; * va_start(ap, fmt); * if (level >= log_threshold) * vfprintf(stderr, fmt, ap); * va_end(ap); * } * * int main(int argc, char *argv[]) * { * if (argc != 1) { * logger(3, "Don't want %i arguments!\n", argc-1); * return 1; * } * return 0; * } */ int main(int argc, char *argv[]) { /* Expect exactly one argument */ if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } return 1; } bfgminer-bfgminer-3.10.0/ccan/compiler/compiler.h000066400000000000000000000133321226556647300216760ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) any * later version. See LICENSE for more details. */ #ifndef CCAN_COMPILER_H #define CCAN_COMPILER_H #include "config.h" #ifndef COLD #if HAVE_ATTRIBUTE_COLD /** * COLD - a function is unlikely to be called. * * Used to mark an unlikely code path and optimize appropriately. * It is usually used on logging or error routines. * * Example: * static void COLD moan(const char *reason) * { * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno)); * } */ #define COLD __attribute__((cold)) #else #define COLD #endif #endif #ifndef NORETURN #if HAVE_ATTRIBUTE_NORETURN /** * NORETURN - a function does not return * * Used to mark a function which exits; useful for suppressing warnings. * * Example: * static void NORETURN fail(const char *reason) * { * fprintf(stderr, "Error: %s (%s)\n", reason, strerror(errno)); * exit(1); * } */ #define NORETURN __attribute__((noreturn)) #else #define NORETURN #endif #endif #ifndef PRINTF_FMT #if HAVE_ATTRIBUTE_PRINTF /** * PRINTF_FMT - a function takes printf-style arguments * @nfmt: the 1-based number of the function's format argument. * @narg: the 1-based number of the function's first variable argument. * * This allows the compiler to check your parameters as it does for printf(). * * Example: * void PRINTF_FMT(2,3) my_printf(const char *prefix, const char *fmt, ...); */ #define PRINTF_FMT(nfmt, narg) \ __attribute__((format(__printf__, nfmt, narg))) #else #define PRINTF_FMT(nfmt, narg) #endif #endif #ifndef IDEMPOTENT #if HAVE_ATTRIBUTE_CONST /** * IDEMPOTENT - a function's return depends only on its argument * * This allows the compiler to assume that the function will return the exact * same value for the exact same arguments. This implies that the function * must not use global variables, or dereference pointer arguments. */ #define IDEMPOTENT __attribute__((const)) #else #define IDEMPOTENT #endif #endif #if HAVE_ATTRIBUTE_UNUSED #ifndef UNNEEDED /** * UNNEEDED - a variable/function may not be needed * * This suppresses warnings about unused variables or functions, but tells * the compiler that if it is unused it need not emit it into the source code. * * Example: * // With some preprocessor options, this is unnecessary. * static UNNEEDED int counter; * * // With some preprocessor options, this is unnecessary. * static UNNEEDED void add_to_counter(int add) * { * counter += add; * } */ #define UNNEEDED __attribute__((unused)) #endif #ifndef NEEDED #if HAVE_ATTRIBUTE_USED /** * NEEDED - a variable/function is needed * * This suppresses warnings about unused variables or functions, but tells * the compiler that it must exist even if it (seems) unused. * * Example: * // Even if this is unused, these are vital for debugging. * static NEEDED int counter; * static NEEDED void dump_counter(void) * { * printf("Counter is %i\n", counter); * } */ #define NEEDED __attribute__((used)) #else /* Before used, unused functions and vars were always emitted. */ #define NEEDED __attribute__((unused)) #endif #endif #ifndef UNUSED /** * UNUSED - a parameter is unused * * Some compilers (eg. gcc with -W or -Wunused) warn about unused * function parameters. This suppresses such warnings and indicates * to the reader that it's deliberate. * * Example: * // This is used as a callback, so needs to have this prototype. * static int some_callback(void *unused UNUSED) * { * return 0; * } */ #define UNUSED __attribute__((unused)) #endif #else #ifndef UNNEEDED #define UNNEEDED #endif #ifndef NEEDED #define NEEDED #endif #ifndef UNUSED #define UNUSED #endif #endif #ifndef IS_COMPILE_CONSTANT #if HAVE_BUILTIN_CONSTANT_P /** * IS_COMPILE_CONSTANT - does the compiler know the value of this expression? * @expr: the expression to evaluate * * When an expression manipulation is complicated, it is usually better to * implement it in a function. However, if the expression being manipulated is * known at compile time, it is better to have the compiler see the entire * expression so it can simply substitute the result. * * This can be done using the IS_COMPILE_CONSTANT() macro. * * Example: * enum greek { ALPHA, BETA, GAMMA, DELTA, EPSILON }; * * // Out-of-line version. * const char *greek_name(enum greek greek); * * // Inline version. * static inline const char *_greek_name(enum greek greek) * { * switch (greek) { * case ALPHA: return "alpha"; * case BETA: return "beta"; * case GAMMA: return "gamma"; * case DELTA: return "delta"; * case EPSILON: return "epsilon"; * default: return "**INVALID**"; * } * } * * // Use inline if compiler knows answer. Otherwise call function * // to avoid copies of the same code everywhere. * #define greek_name(g) \ * (IS_COMPILE_CONSTANT(greek) ? _greek_name(g) : greek_name(g)) */ #define IS_COMPILE_CONSTANT(expr) __builtin_constant_p(expr) #else /* If we don't know, assume it's not. */ #define IS_COMPILE_CONSTANT(expr) 0 #endif #endif #ifndef WARN_UNUSED_RESULT #if HAVE_WARN_UNUSED_RESULT /** * WARN_UNUSED_RESULT - warn if a function return value is unused. * * Used to mark a function where it is extremely unlikely that the caller * can ignore the result, eg realloc(). * * Example: * // buf param may be freed by this; need return value! * static char *WARN_UNUSED_RESULT enlarge(char *buf, unsigned *size) * { * return realloc(buf, (*size) *= 2); * } */ #define WARN_UNUSED_RESULT __attribute__((warn_unused_result)) #else #define WARN_UNUSED_RESULT #endif #endif #endif /* CCAN_COMPILER_H */ bfgminer-bfgminer-3.10.0/ccan/compiler/test/000077500000000000000000000000001226556647300206705ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ccan/compiler/test/compile_fail-printf.c000066400000000000000000000012661226556647300247640ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) any * later version. See LICENSE for more details. */ #include static void PRINTF_FMT(2,3) my_printf(int x, const char *fmt, ...) { } int main(int argc, char *argv[]) { unsigned int i = 0; my_printf(1, "Not a pointer " #ifdef FAIL "%p", #if !HAVE_ATTRIBUTE_PRINTF #error "Unfortunately we don't fail if !HAVE_ATTRIBUTE_PRINTF." #endif #else "%i", #endif i); return 0; } bfgminer-bfgminer-3.10.0/ccan/compiler/test/run-is_compile_constant.c000066400000000000000000000012121226556647300256660ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) any * later version. See LICENSE for more details. */ #include #include int main(int argc, char *argv[]) { plan_tests(2); ok1(!IS_COMPILE_CONSTANT(argc)); #if HAVE_BUILTIN_CONSTANT_P ok1(IS_COMPILE_CONSTANT(7)); #else pass("If !HAVE_BUILTIN_CONSTANT_P, IS_COMPILE_CONSTANT always false"); #endif return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/000077500000000000000000000000001226556647300167015ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ccan/opt/LICENSE000066400000000000000000000431031226556647300177070ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. bfgminer-bfgminer-3.10.0/ccan/opt/_info000066400000000000000000000037161226556647300177250ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include "config.h" /** * opt - simple command line parsing * * Simple but powerful command line parsing. * * Example: * #include * #include * #include * * static bool someflag; * static int verbose; * static char *somestring; * * static struct opt_table opts[] = { * OPT_WITHOUT_ARG("--verbose|-v", opt_inc_intval, &verbose, * "Verbose mode (can be specified more than once)"), * OPT_WITHOUT_ARG("--someflag", opt_set_bool, &someflag, * "Set someflag"), * OPT_WITH_ARG("--somefile=", opt_set_charp, opt_show_charp, * &somestring, "Set somefile to "), * OPT_WITHOUT_ARG("--usage|--help|-h", opt_usage_and_exit, * "args...\nA silly test program.", * "Print this message."), * OPT_ENDTABLE * }; * * int main(int argc, char *argv[]) * { * int i; * * opt_register_table(opts, NULL); * // For fun, register an extra one. * opt_register_noarg("--no-someflag", opt_set_invbool, &someflag, * "Unset someflag"); * if (!opt_parse(&argc, argv, opt_log_stderr)) * exit(1); * * printf("someflag = %i, verbose = %i, somestring = %s\n", * someflag, verbose, somestring); * printf("%u args left over:", argc - 1); * for (i = 1; i < argc; i++) * printf(" %s", argv[i]); * printf("\n"); * return 0; * } * * License: GPL (2 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { printf("ccan/typesafe_cb\n"); printf("ccan/compiler\n"); return 0; } return 1; } bfgminer-bfgminer-3.10.0/ccan/opt/helpers.c000066400000000000000000000100771226556647300205140ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * Copyright 2011 Con Kolivas * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include #include #include #include "private.h" /* Upper bound to sprintf this simple type? Each 3 bits < 1 digit. */ #define CHAR_SIZE(type) (((sizeof(type)*CHAR_BIT + 2) / 3) + 1) /* FIXME: asprintf module? */ static char *arg_bad(const char *fmt, const char *arg) { char *str = malloc(strlen(fmt) + strlen(arg)); sprintf(str, fmt, arg); return str; } char *opt_set_bool(bool *b) { *b = true; return NULL; } char *opt_set_invbool(bool *b) { *b = false; return NULL; } char *opt_set_bool_arg(const char *arg, bool *b) { if (!strcasecmp(arg, "yes") || !strcasecmp(arg, "true")) return opt_set_bool(b); if (!strcasecmp(arg, "no") || !strcasecmp(arg, "false")) return opt_set_invbool(b); return opt_invalid_argument(arg); } char *opt_set_invbool_arg(const char *arg, bool *b) { char *err = opt_set_bool_arg(arg, b); if (!err) *b = !*b; return err; } /* Set a char *. */ char *opt_set_charp(const char *arg, char **p) { *p = (char *)arg; return NULL; } /* Set an integer value, various forms. Sets to 1 on arg == NULL. */ char *opt_set_intval(const char *arg, int *i) { long l; char *err = opt_set_longval(arg, &l); if (err) return err; *i = l; /* Beware truncation... */ if (*i != l) return arg_bad("value '%s' does not fit into an integer", arg); return err; } char *opt_set_floatval(const char *arg, float *f) { char *endp; errno = 0; *f = strtof(arg, &endp); if (*endp || !arg[0]) return arg_bad("'%s' is not a number", arg); if (errno) return arg_bad("'%s' is out of range", arg); return NULL; } char *opt_set_uintval(const char *arg, unsigned int *ui) { int i; char *err = opt_set_intval(arg, &i); if (err) return err; if (i < 0) return arg_bad("'%s' is negative", arg); *ui = i; return NULL; } char *opt_set_longval(const char *arg, long *l) { char *endp; /* This is how the manpage says to do it. Yech. */ errno = 0; *l = strtol(arg, &endp, 0); if (*endp || !arg[0]) return arg_bad("'%s' is not a number", arg); if (errno) return arg_bad("'%s' is out of range", arg); return NULL; } char *opt_set_ulongval(const char *arg, unsigned long *ul) { long int l; char *err; err = opt_set_longval(arg, &l); if (err) return err; *ul = l; if (l < 0) return arg_bad("'%s' is negative", arg); return NULL; } char *opt_inc_intval(int *i) { (*i)++; return NULL; } /* Display version string. */ char *opt_version_and_exit(const char *version) { printf("%s\n", version); fflush(stdout); exit(0); } char *opt_usage_and_exit(const char *extra) { printf("%s", opt_usage(opt_argv0, extra)); fflush(stdout); exit(0); } void opt_show_bool(char buf[OPT_SHOW_LEN], const bool *b) { strncpy(buf, *b ? "true" : "false", OPT_SHOW_LEN); } void opt_show_invbool(char buf[OPT_SHOW_LEN], const bool *b) { strncpy(buf, *b ? "false" : "true", OPT_SHOW_LEN); } void opt_show_charp(char buf[OPT_SHOW_LEN], char *const *p) { size_t len = strlen(*p); buf[0] = '"'; if (len > OPT_SHOW_LEN - 2) len = OPT_SHOW_LEN - 2; strncpy(buf+1, *p, len); buf[1+len] = '"'; if (len < OPT_SHOW_LEN - 2) buf[2+len] = '\0'; } /* Set an integer value, various forms. Sets to 1 on arg == NULL. */ void opt_show_intval(char buf[OPT_SHOW_LEN], const int *i) { snprintf(buf, OPT_SHOW_LEN, "%i", *i); } void opt_show_floatval(char buf[OPT_SHOW_LEN], const float *f) { snprintf(buf, OPT_SHOW_LEN, "%.1f", *f); } void opt_show_uintval(char buf[OPT_SHOW_LEN], const unsigned int *ui) { snprintf(buf, OPT_SHOW_LEN, "%u", *ui); } void opt_show_longval(char buf[OPT_SHOW_LEN], const long *l) { snprintf(buf, OPT_SHOW_LEN, "%li", *l); } void opt_show_ulongval(char buf[OPT_SHOW_LEN], const unsigned long *ul) { snprintf(buf, OPT_SHOW_LEN, "%lu", *ul); } bfgminer-bfgminer-3.10.0/ccan/opt/opt.c000066400000000000000000000132141226556647300176500ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * Copyright 2011 Ycros * Copyright 2011 Con Kolivas * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include #include #include #ifndef WIN32 #include #else #include #define errx(status, fmt, ...) { \ fprintf(stderr, fmt, __VA_ARGS__); \ fprintf(stderr, "\n"); \ exit(status); } #endif #include #include #include #include "private.h" struct opt_table *opt_table; unsigned int opt_count, opt_num_short, opt_num_short_arg, opt_num_long; const char *opt_argv0; /* Returns string after first '-'. */ static const char *first_name(const char *names, unsigned *len) { *len = strcspn(names + 1, "|= "); return names + 1; } static const char *next_name(const char *names, unsigned *len) { names += *len; if (names[0] == ' ' || names[0] == '=' || names[0] == '\0') return NULL; return first_name(names + 1, len); } static const char *first_opt(unsigned *i, unsigned *len) { for (*i = 0; *i < opt_count; (*i)++) { if (opt_table[*i].type == OPT_SUBTABLE) continue; return first_name(opt_table[*i].names, len); } return NULL; } static const char *next_opt(const char *p, unsigned *i, unsigned *len) { for (; *i < opt_count; (*i)++) { if (opt_table[*i].type == OPT_SUBTABLE) continue; if (!p) return first_name(opt_table[*i].names, len); p = next_name(p, len); if (p) return p; } return NULL; } const char *first_lopt(unsigned *i, unsigned *len) { const char *p; for (p = first_opt(i, len); p; p = next_opt(p, i, len)) { if (p[0] == '-') { /* Skip leading "-" */ (*len)--; p++; break; } } return p; } const char *next_lopt(const char *p, unsigned *i, unsigned *len) { for (p = next_opt(p, i, len); p; p = next_opt(p, i, len)) { if (p[0] == '-') { /* Skip leading "-" */ (*len)--; p++; break; } } return p; } const char *first_sopt(unsigned *i) { const char *p; unsigned int len = 0 /* GCC bogus warning */; for (p = first_opt(i, &len); p; p = next_opt(p, i, &len)) { if (p[0] != '-') break; } return p; } const char *next_sopt(const char *p, unsigned *i) { unsigned int len = 1; for (p = next_opt(p, i, &len); p; p = next_opt(p, i, &len)) { if (p[0] != '-') break; } return p; } static void check_opt(const struct opt_table *entry) { const char *p; unsigned len; if (entry->type != OPT_HASARG && entry->type != OPT_NOARG) errx(1, "Option %s: unknown entry type %u", entry->names, entry->type); if (!entry->desc) errx(1, "Option %s: description cannot be NULL", entry->names); if (entry->names[0] != '-') errx(1, "Option %s: does not begin with '-'", entry->names); for (p = first_name(entry->names, &len); p; p = next_name(p, &len)) { if (*p == '-') { if (len == 1) errx(1, "Option %s: invalid long option '--'", entry->names); opt_num_long++; } else { if (len != 1) errx(1, "Option %s: invalid short option" " '%.*s'", entry->names, len+1, p-1); opt_num_short++; if (entry->type == OPT_HASARG) opt_num_short_arg++; } /* Don't document args unless there are some. */ if (entry->type == OPT_NOARG) { if (p[len] == ' ' || p[len] == '=') errx(1, "Option %s: does not take arguments" " '%s'", entry->names, p+len+1); } } } static void add_opt(const struct opt_table *entry) { opt_table = realloc(opt_table, sizeof(opt_table[0]) * (opt_count+1)); opt_table[opt_count++] = *entry; } void _opt_register(const char *names, enum opt_type type, char *(*cb)(void *arg), char *(*cb_arg)(const char *optarg, void *arg), void (*show)(char buf[OPT_SHOW_LEN], const void *arg), const void *arg, const char *desc) { struct opt_table opt; opt.names = names; opt.type = type; opt.cb = cb; opt.cb_arg = cb_arg; opt.show = show; opt.u.carg = arg; opt.desc = desc; check_opt(&opt); add_opt(&opt); } void opt_register_table(const struct opt_table entry[], const char *desc) { unsigned int i, start = opt_count; if (desc) { struct opt_table heading = OPT_SUBTABLE(NULL, desc); add_opt(&heading); } for (i = 0; entry[i].type != OPT_END; i++) { if (entry[i].type == OPT_SUBTABLE) opt_register_table(subtable_of(&entry[i]), entry[i].desc); else { check_opt(&entry[i]); add_opt(&entry[i]); } } /* We store the table length in arg ptr. */ if (desc) opt_table[start].u.tlen = (opt_count - start); } /* Parse your arguments. */ bool opt_parse(int *argc, char *argv[], void (*errlog)(const char *fmt, ...)) { int ret; unsigned offset = 0; #ifdef WIN32 char *original_argv0 = argv[0]; argv[0] = (char*)basename(argv[0]); #endif /* This helps opt_usage. */ opt_argv0 = argv[0]; while ((ret = parse_one(argc, argv, &offset, errlog)) == 1); #ifdef WIN32 argv[0] = original_argv0; #endif /* parse_one returns 0 on finish, -1 on error */ return (ret == 0); } void opt_free_table(void) { free(opt_table); opt_table=0; } void opt_log_stderr(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); } void opt_log_stderr_exit(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); va_end(ap); exit(1); } char *opt_invalid_argument(const char *arg) { char *str = malloc(sizeof("Invalid argument '%s'") + strlen(arg)); sprintf(str, "Invalid argument '%s'", arg); return str; } bfgminer-bfgminer-3.10.0/ccan/opt/opt.h000066400000000000000000000307111226556647300176560ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * Copyright 2011 Con Kolivas * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #ifndef CCAN_OPT_H #define CCAN_OPT_H #include #include #include #include struct opt_table; /** * OPT_WITHOUT_ARG() - macro for initializing an opt_table entry (without arg) * @names: the names of the option eg. "--foo", "-f" or "--foo|-f|--foobar". * @cb: the callback when the option is found. * @arg: the argument to hand to @cb. * @desc: the description for opt_usage(), or opt_hidden. * * This is a typesafe wrapper for initializing a struct opt_table. The callback * of type "char *cb(type *)", "char *cb(const type *)" or "char *cb(void *)", * where "type" is the type of the @arg argument. * * If the @cb returns non-NULL, opt_parse() will stop parsing, use the * returned string to form an error message for errlog(), free() the * string and return false. * * Any number of equivalent short or long options can be listed in @names, * separated by '|'. Short options are a single hyphen followed by a single * character, long options are two hyphens followed by one or more characters. * * See Also: * OPT_WITH_ARG() */ #define OPT_WITHOUT_ARG(names, cb, arg, desc) \ { (names), OPT_CB_NOARG((cb), (arg)), { (arg) }, (desc) } /** * OPT_WITH_ARG() - macro for initializing long and short option (with arg) * @names: the option names eg. "--foo=", "-f" or "-f|--foo ". * @cb: the callback when the option is found (along with ). * @show: the callback to print the value in get_usage (or NULL) * @arg: the argument to hand to @cb and @show * @desc: the description for opt_usage(), or opt_hidden. * * This is a typesafe wrapper for initializing a struct opt_table. The callback * is of type "char *cb(const char *, type *)", * "char *cb(const char *, const type *)" or "char *cb(const char *, void *)", * where "type" is the type of the @arg argument. The first argument to the * @cb is the argument found on the commandline. * * Similarly, if @show is not NULL, it should be of type "void *show(char *, * const type *)". It should write up to OPT_SHOW_LEN bytes into the first * argument; unless it uses the entire OPT_SHOW_LEN bytes it should * nul-terminate that buffer. * * Any number of equivalent short or long options can be listed in @names, * separated by '|'. Short options are a single hyphen followed by a single * character, long options are two hyphens followed by one or more characters. * A space or equals in @names is ignored for parsing, and only used * for printing the usage. * * If the @cb returns non-NULL, opt_parse() will stop parsing, use the * returned string to form an error message for errlog(), free() the * string and return false. * * See Also: * OPT_WITHOUT_ARG() */ #define OPT_WITH_ARG(name, cb, show, arg, desc) \ { (name), OPT_CB_ARG((cb), (show), (arg)), { (arg) }, (desc) } /** * OPT_SUBTABLE() - macro for including another table inside a table. * @table: the table to include in this table. * @desc: description of this subtable (for opt_usage()) or NULL. */ #define OPT_SUBTABLE(table, desc) \ { (const char *)(table), OPT_SUBTABLE, \ sizeof(_check_is_entry(table)) ? NULL : NULL, NULL, NULL, \ { NULL }, (desc) } /** * OPT_ENDTABLE - macro to create final entry in table. * * This must be the final element in the opt_table array. */ #define OPT_ENDTABLE { NULL, OPT_END, NULL, NULL, NULL, { NULL }, NULL } /** * opt_register_table - register a table of options * @table: the table of options * @desc: description of this subtable (for opt_usage()) or NULL. * * The table must be terminated by OPT_ENDTABLE. * * Example: * static int verbose = 0; * static struct opt_table opts[] = { * OPT_WITHOUT_ARG("--verbose", opt_inc_intval, &verbose, * "Verbose mode (can be specified more than once)"), * OPT_WITHOUT_ARG("-v", opt_inc_intval, &verbose, * "Verbose mode (can be specified more than once)"), * OPT_WITHOUT_ARG("--usage", opt_usage_and_exit, * "args...\nA silly test program.", * "Print this message."), * OPT_ENDTABLE * }; * * ... * opt_register_table(opts, NULL); */ void opt_register_table(const struct opt_table *table, const char *desc); /** * opt_register_noarg - register an option with no arguments * @names: the names of the option eg. "--foo", "-f" or "--foo|-f|--foobar". * @cb: the callback when the option is found. * @arg: the argument to hand to @cb. * @desc: the verbose description of the option (for opt_usage()), or NULL. * * This is used for registering a single commandline option which takes * no argument. * * The callback is of type "char *cb(type *)", "char *cb(const type *)" * or "char *cb(void *)", where "type" is the type of the @arg * argument. * * If the @cb returns non-NULL, opt_parse() will stop parsing, use the * returned string to form an error message for errlog(), free() the * string and return false. */ #define opt_register_noarg(names, cb, arg, desc) \ _opt_register((names), OPT_CB_NOARG((cb), (arg)), (arg), (desc)) /** * opt_register_arg - register an option with an arguments * @names: the names of the option eg. "--foo", "-f" or "--foo|-f|--foobar". * @cb: the callback when the option is found. * @show: the callback to print the value in get_usage (or NULL) * @arg: the argument to hand to @cb. * @desc: the verbose description of the option (for opt_usage()), or NULL. * * This is used for registering a single commandline option which takes * an argument. * * The callback is of type "char *cb(const char *, type *)", * "char *cb(const char *, const type *)" or "char *cb(const char *, void *)", * where "type" is the type of the @arg argument. The first argument to the * @cb is the argument found on the commandline. * * At least one of @longopt and @shortopt must be non-zero. If the * @cb returns false, opt_parse() will stop parsing and return false. * * Example: * static char *explode(const char *optarg, void *unused) * { * errx(1, "BOOM! %s", optarg); * } * ... * opt_register_arg("--explode|--boom", explode, NULL, NULL, opt_hidden); */ #define opt_register_arg(names, cb, show, arg, desc) \ _opt_register((names), OPT_CB_ARG((cb), (show), (arg)), (arg), (desc)) /** * opt_parse - parse arguments. * @argc: pointer to argc * @argv: argv array. * @errlog: the function to print errors * * This iterates through the command line and calls callbacks registered with * opt_register_table()/opt_register_arg()/opt_register_noarg(). If there * are unknown options, missing arguments or a callback returns false, then * an error message is printed and false is returned. * * On success, argc and argv are adjusted so only the non-option elements * remain, and true is returned. * * Example: * if (!opt_parse(&argc, argv, opt_log_stderr)) { * printf("You screwed up, aborting!\n"); * exit(1); * } * * See Also: * opt_log_stderr, opt_log_stderr_exit */ bool opt_parse(int *argc, char *argv[], void (*errlog)(const char *fmt, ...)); /** * opt_free_table - free the table. * * This frees the internal memory. Call this as the last * opt function. */ void opt_free_table(void); /** * opt_log_stderr - print message to stderr. * @fmt: printf-style format. * * This is a helper for opt_parse, to print errors to stderr. * * See Also: * opt_log_stderr_exit */ void opt_log_stderr(const char *fmt, ...); /** * opt_log_stderr_exit - print message to stderr, then exit(1) * @fmt: printf-style format. * * Just like opt_log_stderr, only then does exit(1). This means that * when handed to opt_parse, opt_parse will never return false. * * Example: * // This never returns false; just exits if there's an erorr. * opt_parse(&argc, argv, opt_log_stderr_exit); */ void opt_log_stderr_exit(const char *fmt, ...); /** * opt_invalid_argument - helper to allocate an "Invalid argument '%s'" string * @arg: the argument which was invalid. * * This is a helper for callbacks to return a simple error string. */ char *opt_invalid_argument(const char *arg); /** * opt_usage - create usage message * @argv0: the program name * @extra: extra details to print after the initial command, or NULL. * * Creates a usage message, with the program name, arguments, some extra details * and a table of all the options with their descriptions. If an option has * description opt_hidden, it is not shown here. * * If "extra" is NULL, then the extra information is taken from any * registered option which calls opt_usage_and_exit(). This avoids duplicating * that string in the common case. * * The result should be passed to free(). */ char *opt_usage(const char *argv0, const char *extra); /** * opt_hidden - string for undocumented options. * * This can be used as the desc parameter if you want an option not to be * shown by opt_usage(). */ extern const char opt_hidden[]; /* Maximum length of arg to show in opt_usage */ #define OPT_SHOW_LEN 80 /* Standard helpers. You can write your own: */ /* Sets the @b to true. */ char *opt_set_bool(bool *b); /* Sets @b based on arg: (yes/no/true/false). */ char *opt_set_bool_arg(const char *arg, bool *b); void opt_show_bool(char buf[OPT_SHOW_LEN], const bool *b); /* The inverse */ char *opt_set_invbool(bool *b); void opt_show_invbool(char buf[OPT_SHOW_LEN], const bool *b); /* Sets @b based on !arg: (yes/no/true/false). */ char *opt_set_invbool_arg(const char *arg, bool *b); /* Set a char *. */ char *opt_set_charp(const char *arg, char **p); void opt_show_charp(char buf[OPT_SHOW_LEN], char *const *p); /* Set an integer value, various forms. Sets to 1 on arg == NULL. */ char *opt_set_intval(const char *arg, int *i); void opt_show_intval(char buf[OPT_SHOW_LEN], const int *i); char *opt_set_floatval(const char *arg, float *f); void opt_show_floatval(char buf[OPT_SHOW_LEN], const float *f); char *opt_set_uintval(const char *arg, unsigned int *ui); void opt_show_uintval(char buf[OPT_SHOW_LEN], const unsigned int *ui); char *opt_set_longval(const char *arg, long *l); void opt_show_longval(char buf[OPT_SHOW_LEN], const long *l); char *opt_set_ulongval(const char *arg, unsigned long *ul); void opt_show_ulongval(char buf[OPT_SHOW_LEN], const unsigned long *ul); /* Increment. */ char *opt_inc_intval(int *i); /* Display version string to stdout, exit(0). */ char *opt_version_and_exit(const char *version); /* Display usage string to stdout, exit(0). */ char *opt_usage_and_exit(const char *extra); /* Below here are private declarations. */ /* You can use this directly to build tables, but the macros will ensure * consistency and type safety. */ enum opt_type { OPT_NOARG = 1, /* -f|--foo */ OPT_HASARG = 2, /* -f arg|--foo=arg|--foo arg */ OPT_SUBTABLE = 4, /* Actually, longopt points to a subtable... */ OPT_END = 8, /* End of the table. */ }; struct opt_table { const char *names; /* pipe-separated names, --longopt or -s */ enum opt_type type; char *(*cb)(void *arg); /* OPT_NOARG */ char *(*cb_arg)(const char *optarg, void *arg); /* OPT_HASARG */ void (*show)(char buf[OPT_SHOW_LEN], const void *arg); union { const void *carg; void *arg; size_t tlen; } u; const char *desc; }; /* Resolves to the four parameters for non-arg callbacks. */ #define OPT_CB_NOARG(cb, arg) \ OPT_NOARG, \ typesafe_cb_cast3(char *(*)(void *), \ char *(*)(typeof(*(arg))*), \ char *(*)(const typeof(*(arg))*), \ char *(*)(const void *), (cb)), \ NULL, NULL /* Resolves to the four parameters for arg callbacks. */ #define OPT_CB_ARG(cb, show, arg) \ OPT_HASARG, NULL, \ typesafe_cb_cast3(char *(*)(const char *,void *), \ char *(*)(const char *, typeof(*(arg))*), \ char *(*)(const char *, const typeof(*(arg))*), \ char *(*)(const char *, const void *), \ (cb)), \ typesafe_cb_cast(void (*)(char buf[], const void *), \ void (*)(char buf[], const typeof(*(arg))*), (show)) /* Non-typesafe register function. */ void _opt_register(const char *names, enum opt_type type, char *(*cb)(void *arg), char *(*cb_arg)(const char *optarg, void *arg), void (*show)(char buf[OPT_SHOW_LEN], const void *arg), const void *arg, const char *desc); /* We use this to get typechecking for OPT_SUBTABLE */ static inline int _check_is_entry(struct opt_table *e UNUSED) { return 0; } #endif /* CCAN_OPT_H */ bfgminer-bfgminer-3.10.0/ccan/opt/parse.c000066400000000000000000000070271226556647300201650ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ /* Actual code to parse commandline. */ #include #include #include #include #include "private.h" /* glibc does this as: /tmp/opt-example: invalid option -- 'x' /tmp/opt-example: unrecognized option '--long' /tmp/opt-example: option '--someflag' doesn't allow an argument /tmp/opt-example: option '--s' is ambiguous /tmp/opt-example: option requires an argument -- 's' */ static int parse_err(void (*errlog)(const char *fmt, ...), const char *argv0, const char *arg, unsigned len, const char *problem) { errlog("%s: %.*s: %s", argv0, len, arg, problem); return -1; } static void consume_option(int *argc, char *argv[], unsigned optnum) { memmove(&argv[optnum], &argv[optnum+1], sizeof(argv[optnum]) * (*argc-optnum)); (*argc)--; } /* Returns 1 if argument consumed, 0 if all done, -1 on error. */ int parse_one(int *argc, char *argv[], unsigned *offset, void (*errlog)(const char *fmt, ...)) { unsigned i, arg, len; const char *o, *optarg = NULL; char *problem; if (getenv("POSIXLY_CORRECT")) { /* Don't find options after non-options. */ arg = 1; } else { for (arg = 1; argv[arg]; arg++) { if (argv[arg][0] == '-') break; } } if (!argv[arg] || argv[arg][0] != '-') return 0; /* Special arg terminator option. */ if (strcmp(argv[arg], "--") == 0) { consume_option(argc, argv, arg); return 0; } /* Long options start with -- */ if (argv[arg][1] == '-') { assert(*offset == 0); for (o = first_lopt(&i, &len); o; o = next_lopt(o, &i, &len)) { if (strncmp(argv[arg] + 2, o, len) != 0) continue; if (argv[arg][2 + len] == '=') optarg = argv[arg] + 2 + len + 1; else if (argv[arg][2 + len] != '\0') continue; break; } if (!o) return parse_err(errlog, argv[0], argv[arg], strlen(argv[arg]), "unrecognized option"); /* For error messages, we include the leading '--' */ o -= 2; len += 2; } else { /* offset allows us to handle -abc */ for (o = first_sopt(&i); o; o = next_sopt(o, &i)) { if (argv[arg][*offset + 1] != *o) continue; (*offset)++; break; } if (!o) return parse_err(errlog, argv[0], argv[arg], strlen(argv[arg]), "unrecognized option"); /* For error messages, we include the leading '-' */ o--; len = 2; } if (opt_table[i].type == OPT_NOARG) { if (optarg) return parse_err(errlog, argv[0], o, len, "doesn't allow an argument"); problem = opt_table[i].cb(opt_table[i].u.arg); } else { if (!optarg) { /* Swallow any short options as optarg, eg -afile */ if (*offset && argv[arg][*offset + 1]) { optarg = argv[arg] + *offset + 1; *offset = 0; } else optarg = argv[arg+1]; } if (!optarg) return parse_err(errlog, argv[0], o, len, "requires an argument"); problem = opt_table[i].cb_arg(optarg, opt_table[i].u.arg); } if (problem) { parse_err(errlog, argv[0], o, len, problem); free(problem); return -1; } /* If no more letters in that short opt, reset offset. */ if (*offset && !argv[arg][*offset + 1]) *offset = 0; /* All finished with that option? */ if (*offset == 0) { consume_option(argc, argv, arg); if (optarg && optarg == argv[arg]) consume_option(argc, argv, arg); } return 1; } bfgminer-bfgminer-3.10.0/ccan/opt/private.h000066400000000000000000000016561226556647300205340ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #ifndef CCAN_OPT_PRIVATE_H #define CCAN_OPT_PRIVATE_H extern struct opt_table *opt_table; extern unsigned int opt_count, opt_num_short, opt_num_short_arg, opt_num_long; extern const char *opt_argv0; #define subtable_of(entry) ((struct opt_table *)((entry)->names)) const char *first_sopt(unsigned *i); const char *next_sopt(const char *names, unsigned *i); const char *first_lopt(unsigned *i, unsigned *len); const char *next_lopt(const char *p, unsigned *i, unsigned *len); int parse_one(int *argc, char *argv[], unsigned *offset, void (*errlog)(const char *fmt, ...)); #endif /* CCAN_OPT_PRIVATE_H */ bfgminer-bfgminer-3.10.0/ccan/opt/test/000077500000000000000000000000001226556647300176605ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ccan/opt/test/compile_ok-const-arg.c000066400000000000000000000011641226556647300240420ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include #include #include int main(int argc, char *argv[]) { opt_register_noarg("-v", opt_version_and_exit, (const char *)"1.2.3", (const char *)"Print version"); return 0; } bfgminer-bfgminer-3.10.0/ccan/opt/test/run-checkopt.c000066400000000000000000000065651226556647300224420ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include "config.h" #include #include #include #include #include #include #include "utils.h" /* We don't actually want it to exit... */ static jmp_buf exited; #define errx save_and_jump static void save_and_jump(int ecode, const char *fmt, ...); #include #include #include #include static char *output = NULL; static int saved_vprintf(const char *fmt, va_list ap) { char *p; int ret = vasprintf(&p, fmt, ap); if (output) { output = realloc(output, strlen(output) + strlen(p) + 1); strcat(output, p); free(p); } else output = p; return ret; } static void save_and_jump(int ecode, const char *fmt, ...) { va_list ap; va_start(ap, fmt); saved_vprintf(fmt, ap); va_end(ap); longjmp(exited, ecode + 1); } static void reset(void) { free(output); output = NULL; free(opt_table); opt_table = NULL; opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; } int main(int argc, char *argv[]) { int exitval; plan_tests(14); exitval = setjmp(exited); if (exitval == 0) { /* Bad type. */ _opt_register("-a", OPT_SUBTABLE, (void *)opt_version_and_exit, NULL, NULL, "1.2.3", ""); fail("_opt_register returned?"); } else { ok1(exitval - 1 == 1); ok1(strstr(output, "Option -a: unknown entry type")); } reset(); exitval = setjmp(exited); if (exitval == 0) { /* NULL description. */ opt_register_noarg("-a", test_noarg, "", NULL); fail("_opt_register returned?"); } else { ok1(exitval - 1 == 1); ok1(strstr(output, "Option -a: description cannot be NULL")); } reset(); exitval = setjmp(exited); if (exitval == 0) { /* Bad option name. */ opt_register_noarg("a", test_noarg, "", ""); fail("_opt_register returned?"); } else { ok1(exitval - 1 == 1); ok1(strstr(output, "Option a: does not begin with '-'")); } reset(); exitval = setjmp(exited); if (exitval == 0) { /* Bad option name. */ opt_register_noarg("--", test_noarg, "", ""); fail("_opt_register returned?"); } else { ok1(exitval - 1 == 1); ok1(strstr(output, "Option --: invalid long option '--'")); } reset(); exitval = setjmp(exited); if (exitval == 0) { /* Bad option name. */ opt_register_noarg("--a|-aaa", test_noarg, "", ""); fail("_opt_register returned?"); } else { ok1(exitval - 1 == 1); ok1(strstr(output, "Option --a|-aaa: invalid short option '-aaa'")); } reset(); exitval = setjmp(exited); if (exitval == 0) { /* Documentation for non-optios. */ opt_register_noarg("--a foo", test_noarg, "", ""); fail("_opt_register returned?"); } else { ok1(exitval - 1 == 1); ok1(strstr(output, "Option --a foo: does not take arguments 'foo'")); } reset(); exitval = setjmp(exited); if (exitval == 0) { /* Documentation for non-optios. */ opt_register_noarg("--a=foo", test_noarg, "", ""); fail("_opt_register returned?"); } else { ok1(exitval - 1 == 1); ok1(strstr(output, "Option --a=foo: does not take arguments 'foo'")); } return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/test/run-correct-reporting.c000066400000000000000000000033131226556647300242760ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ /* Make sure when multiple equivalent options, correct one is used for errors */ #include #include #include #include #include #include #include "utils.h" int main(int argc, char *argv[]) { plan_tests(12); /* --aaa without args. */ opt_register_arg("-a|--aaa", test_arg, NULL, "aaa", ""); ok1(!parse_args(&argc, &argv, "--aaa", NULL)); ok1(strstr(err_output, ": --aaa: requires an argument")); free(err_output); err_output = NULL; ok1(!parse_args(&argc, &argv, "-a", NULL)); ok1(strstr(err_output, ": -a: requires an argument")); free(err_output); err_output = NULL; /* Multiple */ opt_register_arg("--bbb|-b|-c|--ccc", test_arg, NULL, "aaa", ""); ok1(!parse_args(&argc, &argv, "--bbb", NULL)); ok1(strstr(err_output, ": --bbb: requires an argument")); free(err_output); err_output = NULL; ok1(!parse_args(&argc, &argv, "-b", NULL)); ok1(strstr(err_output, ": -b: requires an argument")); free(err_output); err_output = NULL; ok1(!parse_args(&argc, &argv, "-c", NULL)); ok1(strstr(err_output, ": -c: requires an argument")); free(err_output); err_output = NULL; ok1(!parse_args(&argc, &argv, "--ccc", NULL)); ok1(strstr(err_output, ": --ccc: requires an argument")); free(err_output); err_output = NULL; /* parse_args allocates argv */ free(argv); return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/test/run-helpers.c000066400000000000000000000250651226556647300223000ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include "config.h" #include #include #include #include #include #include "utils.h" /* We don't actually want it to exit... */ static jmp_buf exited; #define exit(status) longjmp(exited, (status) + 1) #define printf saved_printf static int saved_printf(const char *fmt, ...); #define fprintf saved_fprintf static int saved_fprintf(FILE *ignored, const char *fmt, ...); #define vfprintf(f, fmt, ap) saved_vprintf(fmt, ap) static int saved_vprintf(const char *fmt, va_list ap); #define malloc(size) saved_malloc(size) static void *saved_malloc(size_t size); #include #include #include #include static void reset_options(void) { free(opt_table); opt_table = NULL; opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; } static char *output = NULL; static int saved_vprintf(const char *fmt, va_list ap) { char *p; int ret = vasprintf(&p, fmt, ap); if (output) { output = realloc(output, strlen(output) + strlen(p) + 1); strcat(output, p); free(p); } else output = p; return ret; } static int saved_printf(const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = saved_vprintf(fmt, ap); va_end(ap); return ret; } static int saved_fprintf(FILE *ignored, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = saved_vprintf(fmt, ap); va_end(ap); return ret; } #undef malloc static void *last_allocation; static void *saved_malloc(size_t size) { return last_allocation = malloc(size); } /* Test helpers. */ int main(int argc, char *argv[]) { plan_tests(100); /* opt_set_bool */ { bool arg = false; reset_options(); opt_register_noarg("-a", opt_set_bool, &arg, ""); ok1(parse_args(&argc, &argv, "-a", NULL)); ok1(arg); opt_register_arg("-b", opt_set_bool_arg, NULL, &arg, ""); ok1(parse_args(&argc, &argv, "-b", "no", NULL)); ok1(!arg); ok1(parse_args(&argc, &argv, "-b", "yes", NULL)); ok1(arg); ok1(parse_args(&argc, &argv, "-b", "false", NULL)); ok1(!arg); ok1(parse_args(&argc, &argv, "-b", "true", NULL)); ok1(arg); ok1(!parse_args(&argc, &argv, "-b", "unknown", NULL)); ok1(arg); ok1(strstr(err_output, ": -b: Invalid argument 'unknown'")); } /* opt_set_invbool */ { bool arg = true; reset_options(); opt_register_noarg("-a", opt_set_invbool, &arg, ""); ok1(parse_args(&argc, &argv, "-a", NULL)); ok1(!arg); opt_register_arg("-b", opt_set_invbool_arg, NULL, &arg, ""); ok1(parse_args(&argc, &argv, "-b", "no", NULL)); ok1(arg); ok1(parse_args(&argc, &argv, "-b", "yes", NULL)); ok1(!arg); ok1(parse_args(&argc, &argv, "-b", "false", NULL)); ok1(arg); ok1(parse_args(&argc, &argv, "-b", "true", NULL)); ok1(!arg); ok1(!parse_args(&argc, &argv, "-b", "unknown", NULL)); ok1(!arg); ok1(strstr(err_output, ": -b: Invalid argument 'unknown'")); } /* opt_set_charp */ { char *arg = (char *)"wrong"; reset_options(); opt_register_arg("-a", opt_set_charp, NULL, &arg, "All"); ok1(parse_args(&argc, &argv, "-a", "string", NULL)); ok1(strcmp(arg, "string") == 0); } /* opt_set_intval */ { int arg = 1000; reset_options(); opt_register_arg("-a", opt_set_intval, NULL, &arg, "All"); ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); ok1(arg == 9999); ok1(parse_args(&argc, &argv, "-a", "-9999", NULL)); ok1(arg == -9999); ok1(parse_args(&argc, &argv, "-a", "0", NULL)); ok1(arg == 0); ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); if (sizeof(int) == 4) ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); else fail("Handle other int sizes"); } /* opt_set_uintval */ { unsigned int arg = 1000; reset_options(); opt_register_arg("-a", opt_set_uintval, NULL, &arg, "All"); ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); ok1(arg == 9999); ok1(!parse_args(&argc, &argv, "-a", "-9999", NULL)); ok1(parse_args(&argc, &argv, "-a", "0", NULL)); ok1(arg == 0); ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); if (ULONG_MAX == UINT_MAX) { pass("Can't test overflow"); pass("Can't test error message"); } else { char buf[30]; sprintf(buf, "%lu", ULONG_MAX); ok1(!parse_args(&argc, &argv, "-a", buf, NULL)); ok1(strstr(err_output, ": -a: value '") && strstr(err_output, buf) && strstr(err_output, "' does not fit into an integer")); } } /* opt_set_longval */ { long int arg = 1000; reset_options(); opt_register_arg("-a", opt_set_longval, NULL, &arg, "All"); ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); ok1(arg == 9999); ok1(parse_args(&argc, &argv, "-a", "-9999", NULL)); ok1(arg == -9999); ok1(parse_args(&argc, &argv, "-a", "0", NULL)); ok1(arg == 0); ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); if (sizeof(long) == 4) ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); else if (sizeof(long)== 8) ok1(!parse_args(&argc, &argv, "-a", "18446744073709551616", NULL)); else fail("FIXME: Handle other long sizes"); } /* opt_set_ulongval */ { unsigned long int arg = 1000; reset_options(); opt_register_arg("-a", opt_set_ulongval, NULL, &arg, "All"); ok1(parse_args(&argc, &argv, "-a", "9999", NULL)); ok1(arg == 9999); ok1(!parse_args(&argc, &argv, "-a", "-9999", NULL)); ok1(parse_args(&argc, &argv, "-a", "0", NULL)); ok1(arg == 0); ok1(!parse_args(&argc, &argv, "-a", "100crap", NULL)); if (sizeof(long) == 4) ok1(!parse_args(&argc, &argv, "-a", "4294967296", NULL)); else if (sizeof(long)== 8) ok1(!parse_args(&argc, &argv, "-a", "18446744073709551616", NULL)); else fail("FIXME: Handle other long sizes"); } /* opt_inc_intval */ { int arg = 1000; reset_options(); opt_register_noarg("-a", opt_inc_intval, &arg, ""); ok1(parse_args(&argc, &argv, "-a", NULL)); ok1(arg == 1001); ok1(parse_args(&argc, &argv, "-a", "-a", NULL)); ok1(arg == 1003); ok1(parse_args(&argc, &argv, "-aa", NULL)); ok1(arg == 1005); } /* opt_show_version_and_exit. */ { int exitval; reset_options(); opt_register_noarg("-a", opt_version_and_exit, "1.2.3", ""); /* parse_args allocates argv */ free(argv); argc = 2; argv = malloc(sizeof(argv[0]) * 3); argv[0] = "thisprog"; argv[1] = "-a"; argv[2] = NULL; exitval = setjmp(exited); if (exitval == 0) { opt_parse(&argc, argv, save_err_output); fail("opt_show_version_and_exit returned?"); } else { ok1(exitval - 1 == 0); } ok1(strcmp(output, "1.2.3\n") == 0); free(output); free(argv); output = NULL; } /* opt_usage_and_exit. */ { int exitval; reset_options(); opt_register_noarg("-a", opt_usage_and_exit, "[args]", ""); argc = 2; argv = malloc(sizeof(argv[0]) * 3); argv[0] = "thisprog"; argv[1] = "-a"; argv[2] = NULL; exitval = setjmp(exited); if (exitval == 0) { opt_parse(&argc, argv, save_err_output); fail("opt_usage_and_exit returned?"); } else { ok1(exitval - 1 == 0); } ok1(strstr(output, "[args]")); ok1(strstr(output, argv[0])); ok1(strstr(output, "[-a]")); free(output); free(argv); /* It exits without freeing usage string. */ free(last_allocation); output = NULL; } /* opt_show_bool */ { bool b; char buf[OPT_SHOW_LEN+2] = { 0 }; buf[OPT_SHOW_LEN] = '!'; b = true; opt_show_bool(buf, &b); ok1(strcmp(buf, "true") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); b = false; opt_show_bool(buf, &b); ok1(strcmp(buf, "false") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); } /* opt_show_invbool */ { bool b; char buf[OPT_SHOW_LEN+2] = { 0 }; buf[OPT_SHOW_LEN] = '!'; b = true; opt_show_invbool(buf, &b); ok1(strcmp(buf, "false") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); b = false; opt_show_invbool(buf, &b); ok1(strcmp(buf, "true") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); } /* opt_show_charp */ { char str[OPT_SHOW_LEN*2], *p; char buf[OPT_SHOW_LEN+2] = { 0 }; buf[OPT_SHOW_LEN] = '!'; /* Short test. */ p = str; strcpy(p, "short"); opt_show_charp(buf, &p); ok1(strcmp(buf, "\"short\"") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); /* Truncate test. */ memset(p, 'x', OPT_SHOW_LEN*2); p[OPT_SHOW_LEN*2-1] = '\0'; opt_show_charp(buf, &p); ok1(buf[0] == '"'); ok1(buf[OPT_SHOW_LEN-1] == '"'); ok1(buf[OPT_SHOW_LEN] == '!'); ok1(strspn(buf+1, "x") == OPT_SHOW_LEN-2); } /* opt_show_intval */ { int i; char buf[OPT_SHOW_LEN+2] = { 0 }; buf[OPT_SHOW_LEN] = '!'; i = -77; opt_show_intval(buf, &i); ok1(strcmp(buf, "-77") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); i = 77; opt_show_intval(buf, &i); ok1(strcmp(buf, "77") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); } /* opt_show_uintval */ { unsigned int ui; char buf[OPT_SHOW_LEN+2] = { 0 }; buf[OPT_SHOW_LEN] = '!'; ui = 4294967295U; opt_show_uintval(buf, &ui); ok1(strcmp(buf, "4294967295") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); } /* opt_show_longval */ { long l; char buf[OPT_SHOW_LEN+2] = { 0 }; buf[OPT_SHOW_LEN] = '!'; l = 1234567890L; opt_show_longval(buf, &l); ok1(strcmp(buf, "1234567890") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); } /* opt_show_ulongval */ { unsigned long ul; char buf[OPT_SHOW_LEN+2] = { 0 }; buf[OPT_SHOW_LEN] = '!'; ul = 4294967295UL; opt_show_ulongval(buf, &ul); ok1(strcmp(buf, "4294967295") == 0); ok1(buf[OPT_SHOW_LEN] == '!'); } /* opt_log_stderr. */ { reset_options(); opt_register_noarg("-a", opt_usage_and_exit, "[args]", ""); argc = 2; argv = malloc(sizeof(argv[0]) * 3); argv[0] = "thisprog"; argv[1] = "--garbage"; argv[2] = NULL; ok1(!opt_parse(&argc, argv, opt_log_stderr)); ok1(!strcmp(output, "thisprog: --garbage: unrecognized option\n")); free(output); free(argv); output = NULL; } /* opt_log_stderr_exit. */ { int exitval; reset_options(); opt_register_noarg("-a", opt_usage_and_exit, "[args]", ""); argc = 2; argv = malloc(sizeof(argv[0]) * 3); argv[0] = "thisprog"; argv[1] = "--garbage"; argv[2] = NULL; exitval = setjmp(exited); if (exitval == 0) { opt_parse(&argc, argv, opt_log_stderr_exit); fail("opt_log_stderr_exit returned?"); } else { ok1(exitval - 1 == 1); } free(argv); ok1(!strcmp(output, "thisprog: --garbage: unrecognized option\n")); free(output); output = NULL; } return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/test/run-iter.c000066400000000000000000000042671226556647300216020ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include #include #include #include "utils.h" #include #include #include #include static void reset_options(void) { free(opt_table); opt_table = NULL; opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; } /* Test iterators. */ int main(int argc, char *argv[]) { unsigned j, i, len = 0; const char *p; plan_tests(37 * 2); for (j = 0; j < 2; j ++) { reset_options(); /* Giving subtable a title makes an extra entry! */ opt_register_table(subtables, j == 0 ? NULL : "subtable"); p = first_lopt(&i, &len); ok1(i == j + 0); ok1(len == 3); ok1(strncmp(p, "jjj", len) == 0); p = next_lopt(p, &i, &len); ok1(i == j + 0); ok1(len == 3); ok1(strncmp(p, "lll", len) == 0); p = next_lopt(p, &i, &len); ok1(i == j + 1); ok1(len == 3); ok1(strncmp(p, "mmm", len) == 0); p = next_lopt(p, &i, &len); ok1(i == j + 5); ok1(len == 3); ok1(strncmp(p, "ddd", len) == 0); p = next_lopt(p, &i, &len); ok1(i == j + 6); ok1(len == 3); ok1(strncmp(p, "eee", len) == 0); p = next_lopt(p, &i, &len); ok1(i == j + 7); ok1(len == 3); ok1(strncmp(p, "ggg", len) == 0); p = next_lopt(p, &i, &len); ok1(i == j + 8); ok1(len == 3); ok1(strncmp(p, "hhh", len) == 0); p = next_lopt(p, &i, &len); ok1(!p); p = first_sopt(&i); ok1(i == j + 0); ok1(*p == 'j'); p = next_sopt(p, &i); ok1(i == j + 0); ok1(*p == 'l'); p = next_sopt(p, &i); ok1(i == j + 1); ok1(*p == 'm'); p = next_sopt(p, &i); ok1(i == j + 2); ok1(*p == 'a'); p = next_sopt(p, &i); ok1(i == j + 3); ok1(*p == 'b'); p = next_sopt(p, &i); ok1(i == j + 7); ok1(*p == 'g'); p = next_sopt(p, &i); ok1(i == j + 8); ok1(*p == 'h'); p = next_sopt(p, &i); ok1(!p); } return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/test/run-no-options.c000066400000000000000000000020501226556647300227300ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ /* Make sure we still work with no options registered */ #include #include #include #include #include #include #include "utils.h" int main(int argc, char *argv[]) { const char *myname = argv[0]; plan_tests(7); /* Simple short arg.*/ ok1(!parse_args(&argc, &argv, "-a", NULL)); /* Simple long arg.*/ ok1(!parse_args(&argc, &argv, "--aaa", NULL)); /* Extra arguments preserved. */ ok1(parse_args(&argc, &argv, "extra", "args", NULL)); ok1(argc == 3); ok1(argv[0] == myname); ok1(strcmp(argv[1], "extra") == 0); ok1(strcmp(argv[2], "args") == 0); /* parse_args allocates argv */ free(argv); return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/test/run-usage.c000066400000000000000000000076751226556647300217510ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include #include #include #include "utils.h" #include #include #include #include static char *my_cb(void *p) { return NULL; } static void reset_options(void) { free(opt_table); opt_table = NULL; opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; } /* Test helpers. */ int main(int argc, char *argv[]) { char *output; char *longname = strdup("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); char *shortname = strdup("shortname"); plan_tests(48); opt_register_table(subtables, NULL); opt_register_noarg("--kkk|-k", my_cb, NULL, "magic kkk option"); opt_register_noarg("-?", opt_usage_and_exit, "...", "This message"); opt_register_arg("--longname", opt_set_charp, opt_show_charp, &longname, "a really long option default"); opt_register_arg("--shortname", opt_set_charp, opt_show_charp, &shortname, "a short option default"); output = opt_usage("my name", "ExTrA Args"); diag("%s", output); ok1(strstr(output, "Usage: my name")); ok1(strstr(output, "--jjj|-j|--lll|-l ")); ok1(strstr(output, "ExTrA Args")); ok1(strstr(output, "-a ")); ok1(strstr(output, " Description of a\n")); ok1(strstr(output, "-b ")); ok1(strstr(output, " Description of b (default: b)\n")); ok1(strstr(output, "--ddd ")); ok1(strstr(output, " Description of ddd\n")); ok1(strstr(output, "--eee ")); ok1(strstr(output, " (default: eee)\n")); ok1(strstr(output, "long table options:\n")); ok1(strstr(output, "--ggg|-g ")); ok1(strstr(output, " Description of ggg\n")); ok1(strstr(output, "-h|--hhh ")); ok1(strstr(output, " Description of hhh\n")); ok1(strstr(output, "--kkk|-k")); ok1(strstr(output, "magic kkk option")); /* This entry is hidden. */ ok1(!strstr(output, "--mmm|-m")); free(output); /* NULL should use string from registered options. */ output = opt_usage("my name", NULL); diag("%s", output); ok1(strstr(output, "Usage: my name")); ok1(strstr(output, "--jjj|-j|--lll|-l ")); ok1(strstr(output, "...")); ok1(strstr(output, "-a ")); ok1(strstr(output, " Description of a\n")); ok1(strstr(output, "-b ")); ok1(strstr(output, " Description of b (default: b)\n")); ok1(strstr(output, "--ddd ")); ok1(strstr(output, " Description of ddd\n")); ok1(strstr(output, "--eee ")); ok1(strstr(output, " (default: eee)\n")); ok1(strstr(output, "long table options:\n")); ok1(strstr(output, "--ggg|-g ")); ok1(strstr(output, " Description of ggg\n")); ok1(strstr(output, "-h|--hhh ")); ok1(strstr(output, " Description of hhh\n")); ok1(strstr(output, "--kkk|-k")); ok1(strstr(output, "magic kkk option")); ok1(strstr(output, "--longname")); ok1(strstr(output, "a really long option default")); ok1(strstr(output, "(default: \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"...)")); ok1(strstr(output, "--shortname")); ok1(strstr(output, "a short option default")); ok1(strstr(output, "(default: \"shortname\")")); /* This entry is hidden. */ ok1(!strstr(output, "--mmm|-m")); free(output); reset_options(); /* Empty table test. */ output = opt_usage("nothing", NULL); ok1(strstr(output, "Usage: nothing \n")); free(output); /* No short args. */ opt_register_noarg("--aaa", test_noarg, NULL, "AAAAll"); output = opt_usage("onearg", NULL); ok1(strstr(output, "Usage: onearg \n")); ok1(strstr(output, "--aaa")); ok1(strstr(output, "AAAAll")); free(output); free(shortname); free(longname); return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/test/run.c000066400000000000000000000220271226556647300206330ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include #include #include #include #include "utils.h" static void reset_options(void) { free(opt_table); opt_table = NULL; opt_count = opt_num_short = opt_num_short_arg = opt_num_long = 0; free(err_output); err_output = NULL; } int main(int argc, char *argv[]) { const char *myname = argv[0]; plan_tests(215); /* Simple short arg.*/ opt_register_noarg("-a", test_noarg, NULL, "All"); ok1(parse_args(&argc, &argv, "-a", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 1); /* Simple long arg. */ opt_register_noarg("--aaa", test_noarg, NULL, "AAAAll"); ok1(parse_args(&argc, &argv, "--aaa", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 2); /* Both long and short args. */ opt_register_noarg("--aaa|-a", test_noarg, NULL, "AAAAAAll"); ok1(parse_args(&argc, &argv, "--aaa", "-a", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 4); /* Extra arguments preserved. */ ok1(parse_args(&argc, &argv, "--aaa", "-a", "extra", "args", NULL)); ok1(argc == 3); ok1(argv[0] == myname); ok1(strcmp(argv[1], "extra") == 0); ok1(strcmp(argv[2], "args") == 0); ok1(test_cb_called == 6); /* Malformed versions. */ ok1(!parse_args(&argc, &argv, "--aaa=arg", NULL)); ok1(strstr(err_output, ": --aaa: doesn't allow an argument")); ok1(!parse_args(&argc, &argv, "--aa", NULL)); ok1(strstr(err_output, ": --aa: unrecognized option")); ok1(!parse_args(&argc, &argv, "--aaargh", NULL)); ok1(strstr(err_output, ": --aaargh: unrecognized option")); /* Argument variants. */ reset_options(); test_cb_called = 0; opt_register_arg("-a|--aaa", test_arg, NULL, "aaa", "AAAAAAll"); ok1(parse_args(&argc, &argv, "--aaa", "aaa", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(test_cb_called == 1); ok1(parse_args(&argc, &argv, "--aaa=aaa", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(test_cb_called == 2); ok1(parse_args(&argc, &argv, "-a", "aaa", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(test_cb_called == 3); /* Malformed versions. */ ok1(!parse_args(&argc, &argv, "-a", NULL)); ok1(strstr(err_output, ": -a: requires an argument")); ok1(!parse_args(&argc, &argv, "--aaa", NULL)); ok1(strstr(err_output, ": --aaa: requires an argument")); ok1(!parse_args(&argc, &argv, "--aa", NULL)); ok1(strstr(err_output, ": --aa: unrecognized option")); ok1(!parse_args(&argc, &argv, "--aaargh", NULL)); ok1(strstr(err_output, ": --aaargh: unrecognized option")); /* Now, tables. */ /* Short table: */ reset_options(); test_cb_called = 0; opt_register_table(short_table, NULL); ok1(parse_args(&argc, &argv, "-a", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 1); /* This one needs an arg. */ ok1(parse_args(&argc, &argv, "-b", NULL) == false); ok1(test_cb_called == 1); ok1(parse_args(&argc, &argv, "-b", "b", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 2); /* Long table: */ reset_options(); test_cb_called = 0; opt_register_table(long_table, NULL); ok1(parse_args(&argc, &argv, "--ddd", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 1); /* This one needs an arg. */ ok1(parse_args(&argc, &argv, "--eee", NULL) == false); ok1(test_cb_called == 1); ok1(parse_args(&argc, &argv, "--eee", "eee", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 2); /* Short and long, both. */ reset_options(); test_cb_called = 0; opt_register_table(long_and_short_table, NULL); ok1(parse_args(&argc, &argv, "-g", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 1); ok1(parse_args(&argc, &argv, "--ggg", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 2); /* This one needs an arg. */ ok1(parse_args(&argc, &argv, "-h", NULL) == false); ok1(test_cb_called == 2); ok1(parse_args(&argc, &argv, "-h", "hhh", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 3); ok1(parse_args(&argc, &argv, "--hhh", NULL) == false); ok1(test_cb_called == 3); ok1(parse_args(&argc, &argv, "--hhh", "hhh", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 4); /* Those will all work as tables. */ test_cb_called = 0; reset_options(); opt_register_table(subtables, NULL); ok1(parse_args(&argc, &argv, "-a", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 1); /* This one needs an arg. */ ok1(parse_args(&argc, &argv, "-b", NULL) == false); ok1(test_cb_called == 1); ok1(parse_args(&argc, &argv, "-b", "b", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 2); ok1(parse_args(&argc, &argv, "--ddd", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 3); /* This one needs an arg. */ ok1(parse_args(&argc, &argv, "--eee", NULL) == false); ok1(test_cb_called == 3); ok1(parse_args(&argc, &argv, "--eee", "eee", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 4); /* Short and long, both. */ ok1(parse_args(&argc, &argv, "-g", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 5); ok1(parse_args(&argc, &argv, "--ggg", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 6); /* This one needs an arg. */ ok1(parse_args(&argc, &argv, "-h", NULL) == false); ok1(test_cb_called == 6); ok1(parse_args(&argc, &argv, "-h", "hhh", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 7); ok1(parse_args(&argc, &argv, "--hhh", NULL) == false); ok1(test_cb_called == 7); ok1(parse_args(&argc, &argv, "--hhh", "hhh", NULL)); ok1(argc == 1); ok1(argv[0] == myname); ok1(argv[1] == NULL); ok1(test_cb_called == 8); /* Now the tricky one: -? must not be confused with an unknown option */ test_cb_called = 0; reset_options(); /* glibc's getopt does not handle ? with arguments. */ opt_register_noarg("-?", test_noarg, NULL, "Help"); ok1(parse_args(&argc, &argv, "-?", NULL)); ok1(test_cb_called == 1); ok1(parse_args(&argc, &argv, "-a", NULL) == false); ok1(test_cb_called == 1); ok1(strstr(err_output, ": -a: unrecognized option")); ok1(parse_args(&argc, &argv, "--aaaa", NULL) == false); ok1(test_cb_called == 1); ok1(strstr(err_output, ": --aaaa: unrecognized option")); test_cb_called = 0; reset_options(); /* Corner cases involving short arg parsing weirdness. */ opt_register_noarg("-a|--aaa", test_noarg, NULL, "a"); opt_register_arg("-b|--bbb", test_arg, NULL, "bbb", "b"); opt_register_arg("-c|--ccc", test_arg, NULL, "aaa", "c"); /* -aa == -a -a */ ok1(parse_args(&argc, &argv, "-aa", NULL)); ok1(test_cb_called == 2); ok1(parse_args(&argc, &argv, "-aab", NULL) == false); ok1(test_cb_called == 4); ok1(strstr(err_output, ": -b: requires an argument")); ok1(parse_args(&argc, &argv, "-bbbb", NULL)); ok1(test_cb_called == 5); ok1(parse_args(&argc, &argv, "-aabbbb", NULL)); ok1(test_cb_called == 8); ok1(parse_args(&argc, &argv, "-aabbbb", "-b", "bbb", NULL)); ok1(test_cb_called == 12); ok1(parse_args(&argc, &argv, "-aabbbb", "--bbb", "bbb", NULL)); ok1(test_cb_called == 16); ok1(parse_args(&argc, &argv, "-aabbbb", "--bbb=bbb", NULL)); ok1(test_cb_called == 20); ok1(parse_args(&argc, &argv, "-aacaaa", NULL)); ok1(test_cb_called == 23); ok1(parse_args(&argc, &argv, "-aacaaa", "-a", NULL)); ok1(test_cb_called == 27); ok1(parse_args(&argc, &argv, "-aacaaa", "--bbb", "bbb", "-aacaaa", NULL)); ok1(test_cb_called == 34); test_cb_called = 0; reset_options(); /* -- and POSIXLY_CORRECT */ opt_register_noarg("-a|--aaa", test_noarg, NULL, "a"); ok1(parse_args(&argc, &argv, "-a", "--", "-a", NULL)); ok1(test_cb_called == 1); ok1(argc == 2); ok1(strcmp(argv[1], "-a") == 0); ok1(!argv[2]); unsetenv("POSIXLY_CORRECT"); ok1(parse_args(&argc, &argv, "-a", "somearg", "-a", "--", "-a", NULL)); ok1(test_cb_called == 3); ok1(argc == 3); ok1(strcmp(argv[1], "somearg") == 0); ok1(strcmp(argv[2], "-a") == 0); ok1(!argv[3]); setenv("POSIXLY_CORRECT", "1", 1); ok1(parse_args(&argc, &argv, "-a", "somearg", "-a", "--", "-a", NULL)); ok1(test_cb_called == 4); ok1(argc == 5); ok1(strcmp(argv[1], "somearg") == 0); ok1(strcmp(argv[2], "-a") == 0); ok1(strcmp(argv[3], "--") == 0); ok1(strcmp(argv[4], "-a") == 0); ok1(!argv[5]); /* parse_args allocates argv */ free(argv); return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/opt/test/utils.c000066400000000000000000000053061226556647300211700ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include "config.h" #include #include #include #include #include #include #include #include "utils.h" unsigned int test_cb_called; char *test_noarg(void *arg) { test_cb_called++; return NULL; } char *test_arg(const char *optarg, const char *arg) { test_cb_called++; ok1(strcmp(optarg, arg) == 0); return NULL; } void show_arg(char buf[OPT_SHOW_LEN], const char *arg) { strncpy(buf, arg, OPT_SHOW_LEN); } char *err_output = NULL; void save_err_output(const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); /* Check return, for fascist gcc */ if (vasprintf(&p, fmt, ap) == -1) p = NULL; va_end(ap); if (err_output) { err_output = realloc(err_output, strlen(err_output) + strlen(p) + 1); strcat(err_output, p); free(p); } else err_output = p; } static bool allocated = false; bool parse_args(int *argc, char ***argv, ...) { char **a; va_list ap; va_start(ap, argv); *argc = 1; a = malloc(sizeof(*a) * (*argc + 1)); a[0] = (*argv)[0]; while ((a[*argc] = va_arg(ap, char *)) != NULL) { (*argc)++; a = realloc(a, sizeof(*a) * (*argc + 1)); } if (allocated) free(*argv); *argv = a; allocated = true; /* Re-set before parsing. */ optind = 0; return opt_parse(argc, *argv, save_err_output); } struct opt_table short_table[] = { /* Short opts, different args. */ OPT_WITHOUT_ARG("-a", test_noarg, "a", "Description of a"), OPT_WITH_ARG("-b", test_arg, show_arg, "b", "Description of b"), OPT_ENDTABLE }; struct opt_table long_table[] = { /* Long opts, different args. */ OPT_WITHOUT_ARG("--ddd", test_noarg, "ddd", "Description of ddd"), OPT_WITH_ARG("--eee ", test_arg, show_arg, "eee", ""), OPT_ENDTABLE }; struct opt_table long_and_short_table[] = { /* Short and long, different args. */ OPT_WITHOUT_ARG("--ggg|-g", test_noarg, "ggg", "Description of ggg"), OPT_WITH_ARG("-h|--hhh", test_arg, NULL, "hhh", "Description of hhh"), OPT_ENDTABLE }; /* Sub-table test. */ struct opt_table subtables[] = { /* Two short, and two long long, no description */ OPT_WITH_ARG("--jjj|-j|--lll|-l", test_arg, show_arg, "jjj", ""), /* Hidden option */ OPT_WITH_ARG("--mmm|-m", test_arg, show_arg, "mmm", opt_hidden), OPT_SUBTABLE(short_table, NULL), OPT_SUBTABLE(long_table, "long table options"), OPT_SUBTABLE(long_and_short_table, NULL), OPT_ENDTABLE }; bfgminer-bfgminer-3.10.0/ccan/opt/test/utils.h000066400000000000000000000011251226556647300211700ustar00rootroot00000000000000#ifndef CCAN_OPT_TEST_UTILS_H #define CCAN_OPT_TEST_UTILS_H #include #include bool parse_args(int *argc, char ***argv, ...); extern char *err_output; void save_err_output(const char *fmt, ...); extern unsigned int test_cb_called; char *test_noarg(void *arg); char *test_arg(const char *optarg, const char *arg); void show_arg(char buf[OPT_SHOW_LEN], const char *arg); extern struct opt_table short_table[]; extern struct opt_table long_table[]; extern struct opt_table long_and_short_table[]; extern struct opt_table subtables[]; #endif /* CCAN_OPT_TEST_UTILS_H */ bfgminer-bfgminer-3.10.0/ccan/opt/usage.c000066400000000000000000000056511226556647300201600ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. See LICENSE for more details. */ #include #include #include #include #include #include "private.h" /* We only use this for pointer comparisons. */ const char opt_hidden[1]; static unsigned write_short_options(char *str) { unsigned int i, num = 0; const char *p; for (p = first_sopt(&i); p; p = next_sopt(p, &i)) { if (opt_table[i].desc != opt_hidden) str[num++] = *p; } return num; } #define OPT_SPACE_PAD " " /* FIXME: Get all purdy. */ char *opt_usage(const char *argv0, const char *extra) { unsigned int i, num, len; char *ret, *p; if (!extra) { extra = ""; for (i = 0; i < opt_count; i++) { if (opt_table[i].cb == (void *)opt_usage_and_exit && opt_table[i].u.carg) { extra = opt_table[i].u.carg; break; } } } /* An overestimate of our length. */ len = strlen("Usage: %s ") + strlen(argv0) + strlen("[-%.*s]") + opt_num_short + 1 + strlen(" ") + strlen(extra) + strlen("\n"); for (i = 0; i < opt_count; i++) { if (opt_table[i].type == OPT_SUBTABLE) { len += strlen("\n") + strlen(opt_table[i].desc) + strlen(":\n"); } else if (opt_table[i].desc != opt_hidden) { len += strlen(opt_table[i].names) + strlen(" "); len += strlen(OPT_SPACE_PAD) + strlen(opt_table[i].desc) + 1; if (opt_table[i].show) { len += strlen("(default: %s)") + OPT_SHOW_LEN + sizeof("..."); } len += strlen("\n"); } } p = ret = malloc(len); if (!ret) return NULL; p += sprintf(p, "Usage: %s", argv0); p += sprintf(p, " [-"); num = write_short_options(p); if (num) { p += num; p += sprintf(p, "]"); } else { /* Remove start of single-entry options */ p -= 3; } if (extra) p += sprintf(p, " %s", extra); p += sprintf(p, "\n"); for (i = 0; i < opt_count; i++) { if (opt_table[i].desc == opt_hidden) continue; if (opt_table[i].type == OPT_SUBTABLE) { p += sprintf(p, "%s:\n", opt_table[i].desc); continue; } len = sprintf(p, "%s", opt_table[i].names); if (opt_table[i].type == OPT_HASARG && !strchr(opt_table[i].names, ' ') && !strchr(opt_table[i].names, '=')) len += sprintf(p + len, " "); len += sprintf(p + len, "%.*s", len < strlen(OPT_SPACE_PAD) ? (unsigned)strlen(OPT_SPACE_PAD) - len : 1, OPT_SPACE_PAD); len += sprintf(p + len, "%s", opt_table[i].desc); if (opt_table[i].show) { char buf[OPT_SHOW_LEN + sizeof("...")]; strcpy(buf + OPT_SHOW_LEN, "..."); opt_table[i].show(buf, opt_table[i].u.arg); len += sprintf(p + len, " (default: %s)", buf); } p += len; p += sprintf(p, "\n"); } *p = '\0'; return ret; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/000077500000000000000000000000001226556647300203635ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/LICENSE000066400000000000000000000636371226556647300214070ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/_info000066400000000000000000000112271226556647300214030ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include #include "config.h" /** * typesafe_cb - macros for safe callbacks. * * The basis of the typesafe_cb header is typesafe_cb_cast(): a * conditional cast macro. If an expression exactly matches a given * type, it is cast to the target type, otherwise it is left alone. * * This allows us to create functions which take a small number of * specific types, rather than being forced to use a void *. In * particular, it is useful for creating typesafe callbacks as the * helpers typesafe_cb(), typesafe_cb_preargs() and * typesafe_cb_postargs() demonstrate. * * The standard way of passing arguments to callback functions in C is * to use a void pointer, which the callback then casts back to the * expected type. This unfortunately subverts the type checking the * compiler would perform if it were a direct call. Here's an example: * * static void my_callback(void *_obj) * { * struct obj *obj = _obj; * ... * } * ... * register_callback(my_callback, &my_obj); * * If we wanted to use the natural type for my_callback (ie. "void * my_callback(struct obj *obj)"), we could make register_callback() * take a void * as its first argument, but this would subvert all * type checking. We really want register_callback() to accept only * the exactly correct function type to match the argument, or a * function which takes a void *. * * This is where typesafe_cb() comes in: it uses typesafe_cb_cast() to * cast the callback function if it matches the argument type: * * void _register_callback(void (*cb)(void *arg), void *arg); * #define register_callback(cb, arg) \ * _register_callback(typesafe_cb(void, void *, (cb), (arg)), \ * (arg)) * * On compilers which don't support the extensions required * typesafe_cb_cast() and friend become an unconditional cast, so your * code will compile but you won't get type checking. * * Example: * #include * #include * #include * * // Generic callback infrastructure. * struct callback { * struct callback *next; * int value; * int (*callback)(int value, void *arg); * void *arg; * }; * static struct callback *callbacks; * * static void _register_callback(int value, int (*cb)(int, void *), * void *arg) * { * struct callback *new = malloc(sizeof(*new)); * new->next = callbacks; * new->value = value; * new->callback = cb; * new->arg = arg; * callbacks = new; * } * #define register_callback(value, cb, arg) \ * _register_callback(value, \ * typesafe_cb_preargs(int, void *, \ * (cb), (arg), int),\ * (arg)) * * static struct callback *find_callback(int value) * { * struct callback *i; * * for (i = callbacks; i; i = i->next) * if (i->value == value) * return i; * return NULL; * } * * // Define several silly callbacks. Note they don't use void *! * #define DEF_CALLBACK(name, op) \ * static int name(int val, int *arg) \ * { \ * printf("%s", #op); \ * return val op *arg; \ * } * DEF_CALLBACK(multiply, *); * DEF_CALLBACK(add, +); * DEF_CALLBACK(divide, /); * DEF_CALLBACK(sub, -); * DEF_CALLBACK(or, |); * DEF_CALLBACK(and, &); * DEF_CALLBACK(xor, ^); * DEF_CALLBACK(assign, =); * * // Silly game to find the longest chain of values. * int main(int argc, char *argv[]) * { * int i, run = 1, num = argv[1] ? atoi(argv[1]) : 0; * * for (i = 1; i < 1024;) { * // Since run is an int, compiler checks "add" does too. * register_callback(i++, add, &run); * register_callback(i++, divide, &run); * register_callback(i++, sub, &run); * register_callback(i++, multiply, &run); * register_callback(i++, or, &run); * register_callback(i++, and, &run); * register_callback(i++, xor, &run); * register_callback(i++, assign, &run); * } * * printf("%i ", num); * while (run < 56) { * struct callback *cb = find_callback(num % i); * if (!cb) { * printf("-> STOP\n"); * return 1; * } * num = cb->callback(num, cb->arg); * printf("->%i ", num); * run++; * } * printf("-> Winner!\n"); * return 0; * } * * License: LGPL (2 or any later version) * Author: Rusty Russell */ int main(int argc, char *argv[]) { if (argc != 2) return 1; if (strcmp(argv[1], "depends") == 0) { return 0; } return 1; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/000077500000000000000000000000001226556647300213425ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_fail-cast_if_type-promotable.c000066400000000000000000000014511226556647300307430ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include static void _set_some_value(void *val) { } #define set_some_value(expr) \ _set_some_value(typesafe_cb_cast(void *, long, (expr))) int main(int argc, char *argv[]) { #ifdef FAIL bool x = 0; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else long x = 0; #endif set_some_value(x); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_fail-typesafe_cb-int.c000066400000000000000000000016341226556647300272070ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include void _callback(void (*fn)(void *arg), void *arg); void _callback(void (*fn)(void *arg), void *arg) { fn(arg); } /* Callback is set up to warn if arg isn't a pointer (since it won't * pass cleanly to _callback's second arg. */ #define callback(fn, arg) \ _callback(typesafe_cb(void, (fn), (arg)), (arg)) void my_callback(int something); void my_callback(int something) { } int main(int argc, char *argv[]) { #ifdef FAIL /* This fails due to arg, not due to cast. */ callback(my_callback, 100); #endif return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_fail-typesafe_cb.c000066400000000000000000000020271226556647300264140ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) static void my_callback(char *p) { } int main(int argc, char *argv[]) { char str[] = "hello world"; #ifdef FAIL int *p; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else char *p; #endif p = NULL; /* This should work always. */ register_callback(my_callback, str); /* This will fail with FAIL defined */ register_callback(my_callback, p); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast-multi.c000066400000000000000000000016541226556647300305630ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include struct foo { int x; }; struct bar { int x; }; struct baz { int x; }; struct any { int x; }; struct other { int x; }; static void take_any(struct any *any) { } int main(int argc, char *argv[]) { #ifdef FAIL struct other #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else struct foo #endif *arg = NULL; take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, arg)); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_fail-typesafe_cb_cast.c000066400000000000000000000015141226556647300274260ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include void _set_some_value(void *val); void _set_some_value(void *val) { } #define set_some_value(expr) \ _set_some_value(typesafe_cb_cast(void *, unsigned long, (expr))) int main(int argc, char *argv[]) { #ifdef FAIL int x = 0; set_some_value(x); #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else void *p = 0; set_some_value(p); #endif return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_fail-typesafe_cb_postargs.c000066400000000000000000000016451226556647300303430ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include static void _register_callback(void (*cb)(void *arg, int x), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) static void my_callback(char *p, int x) { } int main(int argc, char *argv[]) { #ifdef FAIL int *p; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else char *p; #endif p = NULL; register_callback(my_callback, p); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_fail-typesafe_cb_preargs.c000066400000000000000000000016451226556647300301440ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include static void _register_callback(void (*cb)(int x, void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) static void my_callback(int x, char *p) { } int main(int argc, char *argv[]) { #ifdef FAIL int *p; #if !HAVE_TYPEOF||!HAVE_BUILTIN_CHOOSE_EXPR||!HAVE_BUILTIN_TYPES_COMPATIBLE_P #error "Unfortunately we don't fail if typesafe_cb_cast is a noop." #endif #else char *p; #endif p = NULL; register_callback(my_callback, p); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_ok-typesafe_cb-NULL.c000066400000000000000000000013641226556647300266650ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include /* NULL args for callback function should be OK for normal and _def. */ static void _register_callback(void (*cb)(const void *arg), const void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, const void *, (cb), (arg)), (arg)) int main(int argc, char *argv[]) { register_callback(NULL, "hello world"); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_ok-typesafe_cb-undefined.c000066400000000000000000000027031226556647300300520ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include /* const args in callbacks should be OK. */ static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) { } #define register_callback_pre(cb, arg) \ _register_callback_pre(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) { } #define register_callback_post(cb, arg) \ _register_callback_post(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) struct undefined; static void my_callback(struct undefined *undef) { } static void my_callback_pre(int x, struct undefined *undef) { } static void my_callback_post(struct undefined *undef, int x) { } int main(int argc, char *argv[]) { struct undefined *handle = NULL; register_callback(my_callback, handle); register_callback_pre(my_callback_pre, handle); register_callback_post(my_callback_post, handle); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_ok-typesafe_cb-vars.c000066400000000000000000000031301226556647300270570ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include /* const args in callbacks should be OK. */ static void _register_callback(void (*cb)(void *arg), void *arg) { } #define register_callback(cb, arg) \ _register_callback(typesafe_cb(void, void *, (cb), (arg)), (arg)) static void _register_callback_pre(void (*cb)(int x, void *arg), void *arg) { } #define register_callback_pre(cb, arg) \ _register_callback_pre(typesafe_cb_preargs(void, void *, (cb), (arg), int), (arg)) static void _register_callback_post(void (*cb)(void *arg, int x), void *arg) { } #define register_callback_post(cb, arg) \ _register_callback_post(typesafe_cb_postargs(void, void *, (cb), (arg), int), (arg)) struct undefined; static void my_callback(struct undefined *undef) { } static void my_callback_pre(int x, struct undefined *undef) { } static void my_callback_post(struct undefined *undef, int x) { } int main(int argc, char *argv[]) { struct undefined *handle = NULL; void (*cb)(struct undefined *undef) = my_callback; void (*pre)(int x, struct undefined *undef) = my_callback_pre; void (*post)(struct undefined *undef, int x) = my_callback_post; register_callback(cb, handle); register_callback_pre(pre, handle); register_callback_post(post, handle); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/compile_ok-typesafe_cb_cast.c000066400000000000000000000020111226556647300271150ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include struct foo { int x; }; struct bar { int x; }; struct baz { int x; }; struct any { int x; }; static void take_any(struct any *any) { } int main(int argc, char *argv[]) { /* Otherwise we get unused warnings for these. */ struct foo *foo = NULL; struct bar *bar = NULL; struct baz *baz = NULL; take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, foo)); take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, bar)); take_any(typesafe_cb_cast3(struct any *, struct foo *, struct bar *, struct baz *, baz)); return 0; } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/test/run.c000066400000000000000000000053021226556647300223120ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #include #include #include #include static char dummy = 0; /* The example usage. */ static void _set_some_value(void *val) { ok1(val == &dummy); } #define set_some_value(expr) \ _set_some_value(typesafe_cb_cast(void *, unsigned long, (expr))) static void _callback_onearg(void (*fn)(void *arg), void *arg) { fn(arg); } static void _callback_preargs(void (*fn)(int a, int b, void *arg), void *arg) { fn(1, 2, arg); } static void _callback_postargs(void (*fn)(void *arg, int a, int b), void *arg) { fn(arg, 1, 2); } #define callback_onearg(cb, arg) \ _callback_onearg(typesafe_cb(void, void *, (cb), (arg)), (arg)) #define callback_preargs(cb, arg) \ _callback_preargs(typesafe_cb_preargs(void, void *, (cb), (arg), int, int), (arg)) #define callback_postargs(cb, arg) \ _callback_postargs(typesafe_cb_postargs(void, void *, (cb), (arg), int, int), (arg)) static void my_callback_onearg(char *p) { ok1(strcmp(p, "hello world") == 0); } static void my_callback_preargs(int a, int b, char *p) { ok1(a == 1); ok1(b == 2); ok1(strcmp(p, "hello world") == 0); } static void my_callback_postargs(char *p, int a, int b) { ok1(a == 1); ok1(b == 2); ok1(strcmp(p, "hello world") == 0); } /* This is simply a compile test; we promised typesafe_cb_cast can be in a * static initializer. */ struct callback_onearg { void (*fn)(void *arg); const void *arg; }; struct callback_onearg cb_onearg = { typesafe_cb(void, void *, my_callback_onearg, (char *)(intptr_t)"hello world"), "hello world" }; struct callback_preargs { void (*fn)(int a, int b, void *arg); const void *arg; }; struct callback_preargs cb_preargs = { typesafe_cb_preargs(void, void *, my_callback_preargs, (char *)(intptr_t)"hi", int, int), "hi" }; struct callback_postargs { void (*fn)(void *arg, int a, int b); const void *arg; }; struct callback_postargs cb_postargs = { typesafe_cb_postargs(void, void *, my_callback_postargs, (char *)(intptr_t)"hi", int, int), "hi" }; int main(int argc, char *argv[]) { void *p = &dummy; unsigned long l = (unsigned long)p; char str[] = "hello world"; plan_tests(2 + 1 + 3 + 3); set_some_value(p); set_some_value(l); callback_onearg(my_callback_onearg, str); callback_preargs(my_callback_preargs, str); callback_postargs(my_callback_postargs, str); return exit_status(); } bfgminer-bfgminer-3.10.0/ccan/typesafe_cb/typesafe_cb.h000066400000000000000000000122571226556647300230270ustar00rootroot00000000000000/* * Copyright 2011 Rusty Russell * * This library is free software; you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the Free * Software Foundation; either version 2.1 of the License, or (at your option) * any later version. See LICENSE for more details. */ #ifndef CCAN_TYPESAFE_CB_H #define CCAN_TYPESAFE_CB_H #include "config.h" #if HAVE_TYPEOF && HAVE_BUILTIN_CHOOSE_EXPR && HAVE_BUILTIN_TYPES_COMPATIBLE_P /** * typesafe_cb_cast - only cast an expression if it matches a given type * @desttype: the type to cast to * @oktype: the type we allow * @expr: the expression to cast * * This macro is used to create functions which allow multiple types. * The result of this macro is used somewhere that a @desttype type is * expected: if @expr is exactly of type @oktype, then it will be * cast to @desttype type, otherwise left alone. * * This macro can be used in static initializers. * * This is merely useful for warnings: if the compiler does not * support the primitives required for typesafe_cb_cast(), it becomes an * unconditional cast, and the @oktype argument is not used. In * particular, this means that @oktype can be a type which uses the * "typeof": it will not be evaluated if typeof is not supported. * * Example: * // We can take either an unsigned long or a void *. * void _set_some_value(void *val); * #define set_some_value(e) \ * _set_some_value(typesafe_cb_cast(void *, (e), unsigned long)) */ #define typesafe_cb_cast(desttype, oktype, expr) \ __builtin_choose_expr( \ __builtin_types_compatible_p(__typeof__(0?(expr):(expr)), \ oktype), \ (desttype)(expr), (expr)) #else #define typesafe_cb_cast(desttype, oktype, expr) ((desttype)(expr)) #endif /** * typesafe_cb_cast3 - only cast an expression if it matches given types * @desttype: the type to cast to * @ok1: the first type we allow * @ok2: the second type we allow * @ok3: the third type we allow * @expr: the expression to cast * * This is a convenient wrapper for multiple typesafe_cb_cast() calls. * You can chain them inside each other (ie. use typesafe_cb_cast() * for expr) if you need more than 3 arguments. * * Example: * // We can take either a long, unsigned long, void * or a const void *. * void _set_some_value(void *val); * #define set_some_value(expr) \ * _set_some_value(typesafe_cb_cast3(void *,, \ * long, unsigned long, const void *,\ * (expr))) */ #define typesafe_cb_cast3(desttype, ok1, ok2, ok3, expr) \ typesafe_cb_cast(desttype, ok1, \ typesafe_cb_cast(desttype, ok2, \ typesafe_cb_cast(desttype, ok3, \ (expr)))) /** * typesafe_cb - cast a callback function if it matches the arg * @rtype: the return type of the callback function * @atype: the (pointer) type which the callback function expects. * @fn: the callback function to cast * @arg: the (pointer) argument to hand to the callback function. * * If a callback function takes a single argument, this macro does * appropriate casts to a function which takes a single atype argument if the * callback provided matches the @arg. * * It is assumed that @arg is of pointer type: usually @arg is passed * or assigned to a void * elsewhere anyway. * * Example: * void _register_callback(void (*fn)(void *arg), void *arg); * #define register_callback(fn, arg) \ * _register_callback(typesafe_cb(void, (fn), void*, (arg)), (arg)) */ #define typesafe_cb(rtype, atype, fn, arg) \ typesafe_cb_cast(rtype (*)(atype), \ rtype (*)(__typeof__(arg)), \ (fn)) /** * typesafe_cb_preargs - cast a callback function if it matches the arg * @rtype: the return type of the callback function * @atype: the (pointer) type which the callback function expects. * @fn: the callback function to cast * @arg: the (pointer) argument to hand to the callback function. * * This is a version of typesafe_cb() for callbacks that take other arguments * before the @arg. * * Example: * void _register_callback(void (*fn)(int, void *arg), void *arg); * #define register_callback(fn, arg) \ * _register_callback(typesafe_cb_preargs(void, (fn), void *, \ * (arg), int), \ * (arg)) */ #define typesafe_cb_preargs(rtype, atype, fn, arg, ...) \ typesafe_cb_cast(rtype (*)(__VA_ARGS__, atype), \ rtype (*)(__VA_ARGS__, __typeof__(arg)), \ (fn)) /** * typesafe_cb_postargs - cast a callback function if it matches the arg * @rtype: the return type of the callback function * @atype: the (pointer) type which the callback function expects. * @fn: the callback function to cast * @arg: the (pointer) argument to hand to the callback function. * * This is a version of typesafe_cb() for callbacks that take other arguments * after the @arg. * * Example: * void _register_callback(void (*fn)(void *arg, int), void *arg); * #define register_callback(fn, arg) \ * _register_callback(typesafe_cb_postargs(void, (fn), void *, \ * (arg), int), \ * (arg)) */ #define typesafe_cb_postargs(rtype, atype, fn, arg, ...) \ typesafe_cb_cast(rtype (*)(atype, __VA_ARGS__), \ rtype (*)(__typeof__(arg), __VA_ARGS__), \ (fn)) #endif /* CCAN_CAST_IF_TYPE_H */ bfgminer-bfgminer-3.10.0/compat.h000066400000000000000000000145601226556647300166350ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef __COMPAT_H__ #define __COMPAT_H__ #include "config.h" #include #if !(defined(WIN32) || defined(unix)) #define unix #endif #if defined(LL_FOREACH) && !defined(LL_FOREACH2) // Missing from uthash before 1.9.7 #define LL_DELETE2(head,del,next) \ do { \ LDECLTYPE(head) _tmp; \ if ((head) == (del)) { \ (head)=(head)->next; \ } else { \ _tmp = head; \ while (_tmp->next && (_tmp->next != (del))) { \ _tmp = _tmp->next; \ } \ if (_tmp->next) { \ _tmp->next = ((del)->next); \ } \ } \ } while (0) #define LL_FOREACH2(head,el,next) \ for(el=head;el;el=(el)->next) #define LL_FOREACH_SAFE2(head,el,tmp,next) \ for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp) #define LL_PREPEND2(head,add,next) \ do { \ (add)->next = head; \ head = add; \ } while (0) #endif #ifdef WIN32 #include #include #include #include #include #include #ifndef __maybe_unused #define __maybe_unused __attribute__((unused)) #endif #ifndef timersub #define timersub(a, b, result) \ do { \ (result)->tv_sec = (a)->tv_sec - (b)->tv_sec; \ (result)->tv_usec = (a)->tv_usec - (b)->tv_usec; \ if ((result)->tv_usec < 0) { \ --(result)->tv_sec; \ (result)->tv_usec += 1000000; \ } \ } while (0) #endif #ifndef timeradd # define timeradd(a, b, result) \ do { \ (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ if ((result)->tv_usec >= 1000000) \ { \ ++(result)->tv_sec; \ (result)->tv_usec -= 1000000; \ } \ } while (0) #endif // Some versions of MingW define this, but don't handle the timeval.tv_sec case that we use #ifdef localtime_r #undef localtime_r #endif // localtime is thread-safe on Windows // We also use this with timeval.tv_sec, which is incorrectly smaller than time_t on Windows // Need to cast to time_t* to suppress warning - actual problem shouldn't be possible in practice #define localtime_r(timep, result) ( \ memcpy(result, \ ( \ (sizeof(*timep) == sizeof(time_t)) \ ? localtime((time_t*)timep) \ : localtime_convert(*timep) \ ), \ sizeof(*result) \ ) \ ) static inline struct tm *localtime_convert(time_t t) { return localtime(&t); } #endif #ifndef HAVE_NANOSLEEP extern void (*timer_set_now)(struct timeval *); #define cgtime(tvp) timer_set_now(tvp) static inline int nanosleep(const struct timespec *req, struct timespec *rem) { struct timeval tstart; DWORD msecs; cgtime(&tstart); msecs = (req->tv_sec * 1000) + ((999999 + req->tv_nsec) / 1000000); if (SleepEx(msecs, true) == WAIT_IO_COMPLETION) { if (rem) { struct timeval tdone, tnow, tleft; tdone.tv_sec = tstart.tv_sec + req->tv_sec; tdone.tv_usec = tstart.tv_usec + ((999 + req->tv_nsec) / 1000); if (tdone.tv_usec > 1000000) { tdone.tv_usec -= 1000000; ++tdone.tv_sec; } cgtime(&tnow); if (timercmp(&tnow, &tdone, >)) return 0; timersub(&tdone, &tnow, &tleft); rem->tv_sec = tleft.tv_sec; rem->tv_nsec = tleft.tv_usec * 1000; } errno = EINTR; return -1; } return 0; } #undef cgtime #endif #ifndef HAVE_SLEEP static inline int sleep(unsigned int secs) { struct timespec req, rem; req.tv_sec = secs; req.tv_nsec = 0; if (!nanosleep(&req, &rem)) return 0; return rem.tv_sec + (rem.tv_nsec ? 1 : 0); } #endif #ifdef WIN32 enum { PRIO_PROCESS = 0, }; static inline int setpriority(__maybe_unused int which, __maybe_unused int who, __maybe_unused int prio) { return -!SetPriorityClass(GetCurrentProcess(), IDLE_PRIORITY_CLASS); } typedef unsigned long int ulong; typedef unsigned short int ushort; typedef unsigned int uint; #ifndef __SUSECONDS_T_TYPE typedef long suseconds_t; #endif #endif /* WIN32 */ #ifndef HAVE_PTHREAD_CANCEL // Bionic (Android) is intentionally missing pthread_cancel, so it is implemented using pthread_kill (handled in util.c) #include #include #define pthread_cancel(pth) pthread_kill(pth, SIGTERM) extern void pthread_testcancel(void); #ifndef PTHREAD_CANCEL_ENABLE #define PTHREAD_CANCEL_ENABLE 0 #define PTHREAD_CANCEL_DISABLE 1 #endif #ifndef PTHREAD_CANCEL_DEFERRED #define PTHREAD_CANCEL_DEFERRED 0 #define PTHREAD_CANCEL_ASYNCHRONOUS 1 #endif #ifndef PTHREAD_CANCELED #define PTHREAD_CANCELED ((void*)-1) #endif #endif #endif /* __COMPAT_H__ */ bfgminer-bfgminer-3.10.0/configure.ac000066400000000000000000001317401226556647300174670ustar00rootroot00000000000000dnl * Copyright 2012-2013 Luke Dashjr dnl * Copyright 2011-2013 Con Kolivas dnl * Copyright 2010-2011 Jeff Garzik dnl * Copyright 2012 Xiangfu dnl * Copyright 2011 Rusty Russell dnl * Copyright 2011 Mark Crichton dnl * dnl * This program is free software; you can redistribute it and/or modify it dnl * under the terms of the GNU General Public License as published by the Free dnl * Software Foundation; either version 3 of the License, or (at your option) dnl * any later version. See COPYING for more details. ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## m4_define([v_maj], [3]) m4_define([v_min], [10]) m4_define([v_mic], [0]) ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## m4_define([v_ver], [v_maj.v_min.v_mic]) m4_define([lt_rev], m4_eval(v_maj + v_min)) m4_define([lt_cur], v_mic) m4_define([lt_age], v_min) ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## AC_INIT([bfgminer], [v_ver], [luke-jr+bfgminer@utopios.org]) AC_PREREQ([2.59c]) AC_CANONICAL_SYSTEM AC_CONFIG_MACRO_DIR([m4]) AC_CONFIG_SRCDIR([miner.c]) AC_CONFIG_HEADERS([config.h]) AH_BOTTOM([ #ifdef WIN32 #include #endif ]) AM_INIT_AUTOMAKE([foreign subdir-objects]) m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) AC_USE_SYSTEM_EXTENSIONS ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## m4_ifdef([v_rev], , [m4_define([v_rev], [0])]) m4_ifdef([v_rel], , [m4_define([v_rel], [])]) AC_DEFINE_UNQUOTED(CGMINER_MAJOR_VERSION, [v_maj], [Major version]) AC_DEFINE_UNQUOTED(CGMINER_MINOR_VERSION, [v_min], [Minor version]) AC_DEFINE_UNQUOTED(CGMINER_MINOR_SUBVERSION, [v_mic], [Micro version]) version_info="lt_rev:lt_cur:lt_age" release_info="v_rel" AC_SUBST(version_info) AC_SUBST(release_info) ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## ##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--##--## VMAJ=v_maj AC_SUBST(VMAJ) AC_CANONICAL_BUILD AC_CANONICAL_HOST dnl Make sure anyone changing configure.ac/Makefile.am has a clue AM_MAINTAINER_MODE([enable]) dnl Checks for programs AC_PROG_CC_C99 gl_EARLY AC_PROG_GCC_TRADITIONAL AM_PROG_CC_C_O AC_PROG_RANLIB AC_PROG_CPP gl_INIT AC_SYS_LARGEFILE dnl Checks for header files. AC_HEADER_STDC AC_CHECK_HEADERS(syslog.h) AC_CHECK_HEADERS([sys/epoll.h]) AC_CHECK_HEADERS([sys/prctl.h]) AC_CHECK_HEADERS([sys/file.h]) AC_CHECK_HEADERS([linux/spi/spidev.h]) AC_CHECK_HEADERS([sys/file.h]) AC_CHECK_MEMBER([struct i2c_msg.buf],[ true ],[ dnl Note the member is different here to avoid caching screwing things up AC_CHECK_MEMBER([struct i2c_msg.len],[ AC_DEFINE([NEED_LINUX_I2C_H],[1],[Defined if linux/i2c.h is needed to supplement linux/i2c-dev.h]) ],[ true ],[ AC_INCLUDES_DEFAULT #include #include ]) ],[ AC_INCLUDES_DEFAULT #include ]) # Setuid AC_CHECK_HEADERS([pwd.h]) # Check for chroot support AC_CHECK_FUNCS([chroot]) AC_CHECK_FUNCS([sleep]) AC_FUNC_ALLOCA driverlist= algolist=SHA256d optlist= has_fpga=no has_asic=no need_binloader=no need_dynclock=no need_lowl_vcom=no need_lowlevel=no need_lowl_hid=no need_lowl_usb=no have_cygwin=false have_win32=false have_macho=false use_udevrules=true have_udevrules=false AUTOSCAN_CPPFLAGS="" AUTOSCAN_LIBS="" DLOPEN_FLAGS="-ldl" WS2_LIBS="" MM_LIBS="" MATH_LIBS="-lm" RT_LIBS="" case $target in amd64-* | x86_64-*) have_x86_32=false have_x86_64=true bitness="64" ;; i386-* | i486-* | i586-* | i686-* | x86-*) have_x86_32=true have_x86_64=false bitness="32" ;; *) have_x86_32=false have_x86_64=false ;; esac case $target in *-*-mingw*) have_win32=true use_udevrules=false DLOPEN_FLAGS="" WS2_LIBS="-lws2_32" MM_LIBS="-lwinmm" AC_DEFINE([_WIN32_WINNT], [0x0501], "WinNT version for XP+ support") AC_DEFINE([FD_SETSIZE], [4096], [Maximum sockets before fd_set overflows]) ;; *-*-cygwin*) have_cygwin=true use_udevrules=false ;; powerpc-*-darwin*) CFLAGS="$CFLAGS -faltivec" have_macho=true use_udevrules=false ;; *-*-darwin*) have_macho=true use_udevrules=false ;; esac m4_define([BFG_INCLUDE], if test "x$2" = "x"; then $1='' else $1="[#]include <$2>" fi ) m4_define([BFG_PREPROC_IFELSE], BFG_INCLUDE([headerinclude], $2) AC_COMPILE_IFELSE([ AC_LANG_PROGRAM([ ${headerinclude} ], [ #if !( $1 ) #error "$1 false in preprocessor" #endif ]) ],[$3],[$4]) ) AC_CHECK_DECL([HASH_ITER],[ AC_CHECK_DECL([DL_CONCAT],[ true ],[ AC_MSG_ERROR([Could not find DL_FOREACH_SAFE - install uthash-dev 1.9.4+]) ],[ #include ]) ],[ AC_MSG_ERROR([Could not find HASH_ITER - please install uthash-dev 1.9.4+]) ],[ #include ]) driverlist="$driverlist cpu/cpumining" cpumining="no" AC_ARG_ENABLE([cpumining], [AC_HELP_STRING([--enable-cpumining],[Build with CPU mining support (default disabled)])], [cpumining=$enableval] ) if test "x$cpumining" = xyes; then AC_DEFINE_UNQUOTED([WANT_CPUMINE], [1], [Enable CPUMINING]) driverlist="$driverlist cpu:asm/has_yasm" driverlist="$driverlist cpu:sse2/have_sse2" fi AM_CONDITIONAL([HAS_CPUMINE], [test x$cpumining = xyes]) driverlist="$driverlist opencl" opencl="no" AC_ARG_ENABLE([opencl], [AC_HELP_STRING([--enable-opencl],[Compile support for OpenCL (default disabled)])], [opencl=$enableval] ) if test "x$opencl" = xyes; then AC_DEFINE([HAVE_OPENCL], [1], [Defined to 1 if OpenCL support is wanted]) fi AM_CONDITIONAL([HAVE_OPENCL], [test x$opencl = xyes]) m4_define([BFG_PTHREAD_FLAG_CHECK], AC_MSG_CHECKING([for $1]) for cflag in ' -pthread' ''; do for lib in ' -lpthread' ' -lwinpthread' ''; do CFLAGS="${save_CFLAGS}${cflag}" LIBS="${save_LIBS}${lib}" AC_LINK_IFELSE([ AC_LANG_PROGRAM([ #include ], [ void *f = $1; ]) ], [ found_pthread=true PTHREAD_FLAGS="${cflag}" PTHREAD_LIBS="${lib}" if test "x${cflag}${lib}" = "x"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([with${cflag}${lib}]) fi $2 break 2 ]) done done if test "x${found_pthread}" = "xfalse"; then AC_MSG_RESULT([no]) fi ) save_CFLAGS="${CFLAGS}" save_LIBS="${LIBS}" found_pthread=false BFG_PTHREAD_FLAG_CHECK([pthread_cancel],[ AC_DEFINE([HAVE_PTHREAD_CANCEL], [1], [Define if you have a native pthread_cancel]) ]) if test "x${found_pthread}" = "xfalse"; then BFG_PTHREAD_FLAG_CHECK([pthread_create]) if test "x${found_pthread}" = "xfalse"; then AC_MSG_ERROR([Could not find pthread library - please install libpthread]) fi fi # check for nanosleep here, since it is provided by winpthread AC_CHECK_FUNCS([nanosleep]) CFLAGS="${save_CFLAGS}" LIBS="${save_LIBS}" PKG_CHECK_MODULES([JANSSON],[jansson],[ true ],[ AC_MSG_CHECKING([for jansson in system-default locations]) LIBS="$LIBS -ljansson" AC_TRY_LINK([ #include ],[ json_object(); ],[ AC_MSG_RESULT([found]) JANSSON_LIBS=-ljansson ],[ AC_MSG_RESULT([not found]) AC_MSG_ERROR([Could not find jansson library]) ]) LIBS="${save_LIBS}" ]) AC_SUBST(JANSSON_CFLAGS) AC_SUBST(JANSSON_LIBS) if test "x$opencl" = xyes; then adl="yes" driverlist="$driverlist opencl:sensors/with_sensors" AC_ARG_WITH([sensors], [AC_HELP_STRING([--without-sensors],[Build with libsensors monitoring (default enabled)])], [true],[with_sensors=auto]) if test "x$opencl" != xyes; then with_sensors=no fi if test "x$with_sensors" != xno; then AC_MSG_CHECKING([for libsensors]) save_LIBS="${LIBS}" LIBS="$LIBS -lsensors" AC_LINK_IFELSE([AC_LANG_PROGRAM([ #include #include ],[ const sensors_chip_name *cn; cn = sensors_get_detected_chips(NULL, NULL); ])],[ with_sensors=yes sensors_LIBS="-lsensors" AC_DEFINE([HAVE_SENSORS], [1], [Defined if libsensors was found]) AC_MSG_RESULT([yes]) ],[ with_sensors=no AC_MSG_RESULT([no]) if ! $have_win32; then with_sensors_enableaction="install libsensors" fi ]) LIBS="$save_LIBS" fi AC_SUBST(sensors_LIBS) driverlist="$driverlist opencl:adl/adl" AC_ARG_ENABLE([adl], [AC_HELP_STRING([--disable-adl],[Build without ADL monitoring (default enabled)])], [adl=$enableval] ) if test x$adl = xyes then AC_DEFINE([HAVE_ADL], [1], [Defined if ADL headers were found]) fi else adl="no" fi driverlist="$driverlist bitforce" AC_ARG_ENABLE([bitforce], [AC_HELP_STRING([--disable-bitforce],[Compile support for BitForce (default enabled)])], [bitforce=$enableval], [bitforce=yes] ) if test "x$bitforce" = xyes; then AC_DEFINE([USE_BITFORCE], [1], [Defined to 1 if BitForce support is wanted]) need_lowl_vcom=yes has_fpga=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([HAS_BITFORCE], [test x$bitforce = xyes]) driverlist="$driverlist icarus cairnsmore/icarus erupter/icarus antminer/icarus" AC_ARG_ENABLE([icarus], [AC_HELP_STRING([--disable-icarus],[Compile support for Icarus (default enabled)])], [icarus=$enableval], [icarus=yes] ) if test "x$icarus" = xyes; then AC_DEFINE([USE_ICARUS], [1], [Defined to 1 if Icarus support is wanted]) need_dynclock=yes need_lowl_vcom=yes has_fpga=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([HAS_ICARUS], [test x$icarus = xyes]) driverlist="$driverlist avalon" avalon="no" AC_ARG_ENABLE([avalon], [AC_HELP_STRING([--disable-avalon],[Compile support for Avalon (default enabled)])], [avalon=$enableval], [avalon=yes] ) if test "x$avalon" = xyes; then AC_DEFINE([USE_AVALON], [1], [Defined to 1 if Avalon support is wanted]) need_lowl_vcom=yes has_asic=yes fi AM_CONDITIONAL([HAS_AVALON], [test x$avalon = xyes]) driverlist="$driverlist knc" AC_ARG_ENABLE([knc], [AC_HELP_STRING([--enable-knc],[Compile support for KnC (default disabled)])], [knc=$enableval], [knc=no] ) if test "x$knc" = xyes; then AC_CHECK_HEADERS([linux/i2c-dev-user.h]) AC_CHECK_DECL([i2c_smbus_read_word_data],[true],[ AC_MSG_ERROR([linux/i2c-dev.h header from i2c-tools (NOT linux headers) is required for knc driver]) ],[ #include #ifdef HAVE_LINUX_I2C_DEV_USER_H #include #else #ifdef NEED_LINUX_I2C_H #include #endif #include #endif ]) AC_DEFINE([USE_KNC], [1], [Defined to 1 if KnC support is wanted]) fi AM_CONDITIONAL([USE_KNC], [test x$knc = xyes]) httpsrv=auto AC_ARG_WITH([libmicrohttpd], [AC_HELP_STRING([--without-libmicrohttpd],[Compile support for libmicrohttpd getwork server (default enabled)])], [httpsrv=$withval] ) if test "x$httpsrv" != "xno"; then PKG_CHECK_MODULES([libmicrohttpd],[libmicrohttpd >= 0.9.5],[ AC_DEFINE([USE_LIBMICROHTTPD],[1],[Defined to 1 if libmicrohttpd support is wanted]) httpsrv=yes ],[ httpsrv=no httpsrv_enableaction="install libmicrohttpd 0.9.5+" need_bfg_driver_proxy_enableaction="install libmicrohttpd 0.9.5+" if test "x$httpsrv" = "xyes"; then AC_MSG_ERROR([Unable to find libmicrohttpd 0.9.5+]) else AC_MSG_WARN([libmicrohttpd 0.9.5+ not found; getwork proxy will be unavailable]) fi ]) fi AM_CONDITIONAL([USE_LIBMICROHTTPD], [test x$httpsrv = xyes]) libevent=auto AC_ARG_WITH([libevent], [AC_HELP_STRING([--without-libevent],[Compile support for libevent stratum server (default enabled)])], [libevent=$withval] ) if test "x$libevent" != "xno"; then PKG_CHECK_MODULES([libevent],[libevent >= 2.0.3],[ AC_DEFINE([USE_LIBEVENT],[1],[Defined to 1 if libevent support is wanted]) libevent=yes ],[ libevent=no libevent_enableaction="install libevent 2.0.3+" if test -n "$need_bfg_driver_proxy_enableaction"; then need_bfg_driver_proxy_enableaction="${need_bfg_driver_proxy_enableaction} (getwork) or libevent 2.0.3+ (stratum)" else need_bfg_driver_proxy_enableaction="install libevent 2.0.3+" fi if test "x$libevent" = "xyes"; then AC_MSG_ERROR([Unable to find libevent 2.0.3+]) else AC_MSG_WARN([libevent 2.0.3+ not found; stratum proxy will be unavailable]) fi ]) fi AM_CONDITIONAL([USE_LIBEVENT], [test x$libevent = xyes]) driverlist="$driverlist proxy/need_bfg_driver_proxy" if test x$libevent$httpsrv = xnono; then need_bfg_driver_proxy=no else need_bfg_driver_proxy=yes driverlist="$driverlist proxy:getwork/httpsrv proxy:stratum/libevent" fi AM_CONDITIONAL([NEED_BFG_DRIVER_PROXY], [test x$libevent$httpsrv != xnono]) driverlist="$driverlist modminer" AC_ARG_ENABLE([modminer], [AC_HELP_STRING([--disable-modminer],[Compile support for ModMiner (default enabled)])], [modminer=$enableval], [modminer=yes] ) if test "x$modminer" = xyes; then AC_DEFINE([USE_MODMINER], [1], [Defined to 1 if ModMiner support is wanted]) need_dynclock=yes need_lowl_vcom=yes need_binloader=yes has_fpga=yes have_udevrules=true fi AM_CONDITIONAL([HAS_MODMINER], [test x$modminer = xyes]) PKG_PROG_PKG_CONFIG() optlist="$optlist libusb" libusb=no libusb_include_path="" AC_ARG_WITH([libusb], [AC_HELP_STRING([--without-libusb],[Compile using libusb (default enabled)])], [want_libusb=$withval], [want_libusb=yes] ) if test "x$want_libusb" = "xyes"; then PKG_CHECK_MODULES([LIBUSB], [libusb-1.0],[ libusb=yes ],[ for usb_lib in usb-1.0 usb; do AC_CHECK_LIB($usb_lib, libusb_init, [ libusb=yes break ]) done if test "x$libusb" = xyes; then AC_CHECK_DECL([libusb_init],[ LIBUSB_LIBS="-l$usb_lib" ],[ AC_MSG_CHECKING([whether libusb_init is declared in subdirectory]) libusb_include_path=`echo '#include ' | ${CPP} -M - 2>/dev/null | sed -E -e 's/^[^:]+:[[:space:]]*(([^[:space:]]|\\\\[[:space:]])*)libusb\\.h([[:space:]].*|$)$/\\1/' -e 't my' -e d -e ': my' -e 's/\\\\?([\\\\[:space:]])/\\\\\\1/g'` if test "x$libusb_include_path" != "x"; then LIBUSB_LIBS="-l$usb_lib" LIBUSB_CFLAGS="-I$libusb_include_path" AC_MSG_RESULT([yes]) else libusb=no AC_MSG_RESULT([no]) fi ],[#include ]) fi ]) fi driverlist="$driverlist klondike" AC_ARG_ENABLE([klondike], [AC_HELP_STRING([--disable-klondike],[Compile support for Klondike (default enabled)])], [klondike=$enableval], [klondike=auto] ) if test "x$klondike$want_libusb" = xyesno; then AC_MSG_ERROR([You disabled libusb, required for Klondike support]) elif test "x$klondike$libusb" = xyesno; then AC_MSG_ERROR([Could not find libusb, required for Klondike support]) elif test "x$klondike" = xauto; then klondike="$libusb" if test "x$libusb" = xno; then AC_MSG_WARN([Could not find libusb, required for Klondike support]) klondike_enableaction="install libusb 1.0+" fi fi if test "x$klondike" = xyes; then AC_DEFINE([USE_KLONDIKE], [1], [Defined to 1 if Klondike support is wanted]) need_lowl_usb=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([HAS_KLONDIKE], [test x$klondike = xyes]) driverlist="$driverlist x6500" AC_ARG_ENABLE([x6500], [AC_HELP_STRING([--disable-x6500],[Compile support for X6500 (default if libusb)])], [x6500=$enableval], [x6500=auto] ) if test "x$x6500$want_libusb" = xyesno; then AC_MSG_ERROR([You disabled libusb, required for X6500 support]) elif test "x$x6500$libusb" = xyesno; then AC_MSG_ERROR([Could not find libusb, required for X6500 support]) elif test "x$x6500" = xauto; then x6500="$libusb" if test "x$libusb" = xno; then AC_MSG_WARN([Could not find libusb, required for X6500 support]) x6500_enableaction="install libusb 1.0+" fi fi if test "x$x6500" = xyes; then AC_DEFINE([USE_X6500], [1], [Defined to 1 if X6500 support is wanted]) need_dynclock=yes need_lowl_usb=yes need_binloader=yes has_fpga=yes have_udevrules=true fi AM_CONDITIONAL([HAS_X6500], [test x$x6500 = xyes]) driverlist="$driverlist ztex" AC_ARG_ENABLE([ztex], [AC_HELP_STRING([--disable-ztex],[Compile support for ZTEX (default if libusb)])], [ztex=$enableval], [ztex=auto] ) if test "x$ztex$want_libusb" = xyesno; then AC_MSG_ERROR([You disabled libusb, required for ZTEX support]) elif test "x$ztex$libusb" = xyesno; then AC_MSG_ERROR([Could not find libusb, required for ZTEX support]) elif test "x$ztex" = xauto; then ztex="$libusb" if test "x$libusb" = xno; then AC_MSG_WARN([Could not find libusb, required for ZTEX support]) ztex_enableaction="install libusb 1.0+" fi fi if test "x$ztex" = xyes; then AC_DEFINE([USE_ZTEX], [1], [Defined to 1 if ZTEX support is wanted]) need_dynclock=yes need_lowl_usb=yes need_binloader=yes has_fpga=yes have_udevrules=true fi AM_CONDITIONAL([HAS_ZTEX], [test x$ztex = xyes]) driverlist="$driverlist bifury" AC_ARG_ENABLE([bifury], [AC_HELP_STRING([--disable-bifury],[Compile support for Bi*Fury (default enabled)])], [bifury=$enableval], [bifury=yes] ) if test "x$bifury" = "xyes"; then AC_DEFINE([USE_BIFURY], [1], [Defined to 1 if Bi*Fury support is wanted]) need_lowl_vcom=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([USE_BIFURY], [test x$bifury = xyes]) driverlist="$driverlist bitfury_gpio/bitfury" bitfury=yes AC_ARG_ENABLE([bitfury], [AC_HELP_STRING([--disable-bitfury],[Compile support for Bitfury (default enabled)])], [bitfury=$enableval] ) if test "x$bitfury" = xyes; then AC_DEFINE([USE_BITFURY], [1], [Defined to 1 if Bitfury support is wanted]) fi AM_CONDITIONAL([HAS_BITFURY], [test x$bitfury = xyes]) driverlist="$driverlist bfsb" bfsb=no AC_ARG_ENABLE([bfsb], [AC_HELP_STRING([--enable-bfsb],[Compile support for BFSB (default disabled)])], [bfsb=$enableval] ) if test "x$bfsb" = "xyes"; then if test "x$bitfury" = "xno"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled BFSB]) fi AC_DEFINE([USE_BFSB], [1], [Defined to 1 if BFSB support is wanted]) fi AM_CONDITIONAL([HAS_BFSB], [test x$bfsb = xyes]) driverlist="$driverlist bigpic" bigpic=auto AC_ARG_ENABLE([bigpic], [AC_HELP_STRING([--disable-bigpic],[Compile support for Big Picture Mining USB (default enabled)])], [bigpic=$enableval] ) if test "x$bigpic" = "xno"; then true elif test "x$bitfury" = "xyes"; then bigpic=yes elif test "x$bigpic" = "xyes"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled BigPic]) else bigpic=no fi if test "x$bigpic" = "xyes"; then AC_DEFINE([USE_BIGPIC], [1], [Defined to 1 if Big Picture Mining USB support is wanted]) need_lowl_vcom=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([HAS_BIGPIC], [test x$bigpic = xyes]) driverlist="$driverlist drillbit" AC_ARG_ENABLE([drillbit], [AC_HELP_STRING([--disable-drillbit],[Compile support for DrillBit (default enabled)])], [drillbit=$enableval], [drillbit=auto] ) if test "x$drillbit" = "xno"; then true elif test "x$bitfury" = "xyes"; then drillbit=yes elif test "x$drillbit" = "xyes"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled DrillBit]) else drillbit=no fi if test "x$drillbit" = "xyes"; then AC_DEFINE([USE_DRILLBIT], [1], [Defined to 1 if DrillBit support is wanted]) need_lowl_vcom=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([USE_DRILLBIT], [test x$drillbit = xyes]) driverlist="$driverlist twinfury" twinfury=auto AC_ARG_ENABLE([twinfury], [AC_HELP_STRING([--disable-twinfury],[Compile support for Twinfury USB miner (default enabled)])], [twinfury=$enableval] ) if test "x$twinfury" = "xno"; then true elif test "x$bitfury" = "xyes"; then twinfury=yes elif test "x$twinfury" = "xyes"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled Twinfury]) else twinfury=no fi if test "x$twinfury" = "xyes"; then AC_DEFINE([USE_TWINFURY], [1], [Defined to 1 if Twinfury USB miner support is wanted]) need_lowl_vcom=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([HAS_TWINFURY], [test x$twinfury = xyes]) driverlist="$driverlist littlefury" littlefury=auto AC_ARG_ENABLE([littlefury], [AC_HELP_STRING([--disable-littlefury],[Compile support for LittleFury (default enabled)])], [littlefury=$enableval] ) if test "x$littlefury" = "xno"; then true elif test "x$bitfury" = "xyes"; then littlefury=yes elif test "x$littlefury" = "xyes"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled LittleFury]) else littlefury=no fi if test "x$littlefury" = "xyes"; then AC_DEFINE([USE_LITTLEFURY], [1], [Defined to 1 if LittleFury support is wanted]) need_lowl_vcom=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([HAS_LITTLEFURY], [test x$littlefury = xyes]) found_hidapi=false for _hidapi_lib in hidapi hidapi-hidraw hidapi-libusb; do PKG_CHECK_MODULES([hidapi],[$_hidapi_lib],[ found_hidapi=true break ],[ true ]) done driverlist="$driverlist nanofury" nanofury=auto AC_ARG_ENABLE([nanofury], [AC_HELP_STRING([--disable-nanofury],[Compile support for NanoFury (default enabled)])], [nanofury=$enableval] ) if test "x$nanofury" = "xno"; then true elif test "x$bitfury" = "xyes"; then if test x$found_hidapi = xtrue; then nanofury=yes else if test x$nanofury = xauto; then nanofury=no nanofury_enableaction="install hidapi" else AC_MSG_ERROR([Could not find hidapi, required for NanoFury support]) fi fi elif test "x$nanofury" = "xyes"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled NanoFury]) else nanofury=no fi if test "x$nanofury" = "xyes"; then AC_DEFINE([USE_NANOFURY], [1], [Defined to 1 if NanoFury support is wanted]) need_lowl_hid=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([HAS_NANOFURY], [test x$nanofury = xyes]) driverlist="$driverlist hashbuster" hashbuster=auto AC_ARG_ENABLE([hashbuster], [AC_HELP_STRING([--disable-hashbuster],[Compile support for HashBuster Nano (default enabled)])], [hashbuster=$enableval] ) if test "x$hashbuster" = "xno"; then true elif test "x$bitfury" = "xyes"; then if test x$found_hidapi = xtrue; then hashbuster=yes else if test x$hashbuster = xauto; then hashbuster=no hashbuster_enableaction="install hidapi" else AC_MSG_ERROR([Could not find hidapi, required for HashBuster Nano support]) fi fi elif test "x$hashbuster" = "xyes"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled HashBuster Nano]) else hashbuster=no fi if test "x$hashbuster" = "xyes"; then AC_DEFINE([USE_HASHBUSTER], [1], [Defined to 1 if HashBuster Nano support is wanted]) need_lowl_hid=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([USE_HASHBUSTER], [test x$hashbuster = xyes]) driverlist="$driverlist hashbusterusb" AC_ARG_ENABLE([hashbusterusb], [AC_HELP_STRING([--disable-hashbusterusb],[Compile support for HashBuster Micro (default if libusb)])], [hashbusterusb=$enableval], [hashbusterusb=auto] ) if test "x$hashbusterusb$want_libusb" = xyesno; then AC_MSG_ERROR([You disabled libusb, required for HashBuster Micro support]) elif test "x$hashbusterusb$libusb" = xyesno; then AC_MSG_ERROR([Could not find libusb, required for HashBuster Micro support]) elif test "x$hashbusterusb" = xauto; then hashbusterusb="$libusb" if test "x$libusb" = xno; then AC_MSG_WARN([Could not find libusb, required for HashBuster Micro support]) hashbusterusb_enableaction="install libusb 1.0+" fi fi if test "x$hashbusterusb" = xyes; then AC_DEFINE([USE_HASHBUSTERUSB], [1], [Defined to 1 if HashBuster Micro support is wanted]) need_lowl_usb=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([USE_HASHBUSTERUSB], [test x$hashbusterusb = xyes]) driverlist="$driverlist hashfast" hashfast=yes AC_ARG_ENABLE([hashfast], [AC_HELP_STRING([--disable-hashfast],[Compile support for HashFast (default enabled)])], [hashfast=$enableval] ) if test "x$hashfast" = xyes; then AC_DEFINE([USE_HASHFAST], [1], [Defined to 1 if HashFast support is wanted]) need_lowl_vcom=yes has_asic=yes have_udevrules=true fi AM_CONDITIONAL([USE_HASHFAST], [test x$hashfast = xyes]) driverlist="$driverlist metabank" metabank=no AC_ARG_ENABLE([metabank], [AC_HELP_STRING([--enable-metabank],[Compile support for Metabank (default disabled)])], [metabank=$enableval] ) if test "x$metabank" = "xyes"; then if test "x$bitfury" = "xno"; then AC_MSG_ERROR([You explicitly disabled Bitfury and explicitly enabled Metabank]) fi AC_DEFINE([USE_METABANK], [1], [Defined to 1 if Metabank support is wanted]) fi AM_CONDITIONAL([HAS_METABANK], [test x$metabank = xyes]) if test "x$need_lowl_vcom" != "xno"; then # Lowlevel VCOM doesn't need libusb, but it can take advantage of it to reattach drivers need_lowl_usb=yes fi if test "x$need_lowl_usb" = "xno"; then libusb=no LIBUSB_LIBS='' LIBUSB_CFLAGS='' fi if test "x$libusb" = xyes; then AC_DEFINE([HAVE_LIBUSB], [1], [Define if you have libusb-1.0]) save_CFLAGS="$CFLAGS" CFLAGS="$LIBUSB_CFLAGS $CFLAGS" AC_CHECK_DECLS([libusb_error_name],[true],[true],[#include ]) CFLAGS="$save_CFLAGS" fi algolist="$algolist scrypt" scrypt="no" AC_ARG_ENABLE([scrypt], [AC_HELP_STRING([--enable-scrypt],[Compile support for scrypt mining (default disabled)])], [scrypt=$enableval] ) if test "x$scrypt" = xyes; then AC_DEFINE([USE_SCRYPT], [1], [Defined to 1 if scrypt support is wanted]) fi if test x$need_lowl_vcom = xyes; then AC_DEFINE([NEED_BFG_LOWL_VCOM], [1], [Defined to 1 if lowlevel VCOM drivers are being used]) need_lowlevel=yes if $have_win32; then echo '#include ' >iospeeds_local.h found_ddkusb=false AC_CHECK_HEADER([usbiodef.h],[ found_ddkusb=true ],[ AC_CHECK_HEADER([ddk/usbiodef.h],[ found_ddkusb=true AUTOSCAN_CPPFLAGS="-I"`echo '#include ' | ${CPP} -M - 2>/dev/null | sed -E -e 's/^[^:]+:[[:space:]]*(([^[:space:]]|\\\\[[:space:]])*)usbiodef\\.h([[:space:]].*|$)$/\\1/' -e 't my' -e d -e ': my' -e 's/\\\\?([\\\\[:space:]])/\\\\\\1/g'` ],[ true ],[ #include #include AC_INCLUDES_DEFAULT ]) ],[ #include #include AC_INCLUDES_DEFAULT ]) if $found_ddkusb; then AUTOSCAN_LIBS="-lsetupapi" AC_DEFINE([HAVE_WIN_DDKUSB],[1],[Defined to 1 if Windows DDK USB headers are being used]) fi else AC_MSG_CHECKING([what baud rates your system supports]) echo '#include ' | ${CPP} -dM - 2>/dev/null | sed 's/.*[ ]B\([0-9][0-9]*\)[ ].*/IOSPEED(\1)/' | grep IOSPEED >iospeeds_local.h if grep -q IOSPEED iospeeds_local.h; then AC_MSG_RESULT([done]) else AC_MSG_RESULT([failed, using standard POSIX]) echo '#include ' >iospeeds_local.h fi fi fi if test "x$opencl$need_lowl_hid" = xnono; then DLOPEN_FLAGS="" fi if test x$need_lowl_hid = xyes; then AC_DEFINE([NEED_BFG_LOWL_HID], [1], [Defined to 1 if lowlevel hid drivers are being used]) need_lowlevel=yes fi if test x$need_lowl_usb = xyes; then need_lowlevel=yes fi if test x$need_lowl_vcom = xyes; then need_lowlevel=yes fi if test x$need_lowlevel = xyes; then AC_DEFINE([HAVE_BFG_LOWLEVEL], [1], [Defined to 1 if lowlevel drivers are being used]) fi curses="auto" AC_ARG_WITH([curses], [AC_HELP_STRING([--without-curses],[Compile support for curses TUI (default enabled)])], [curses=$withval] ) if test "x$curses" = "xno"; then optlist="$optlist curses" else curses_enableaction="install a curses library" orig_libs="$LIBS" if test "x${curses}" = "xyes"; then preferl='' else preferl="${curses} ${curses}6 ${curses}5" fi for wideornot in w u ''; do for ncursesver in '' 6 5; do preferl="${preferl} ncurses${wideornot}${ncursesver}" done preferl="${preferl} pdcurses${wideornot}" done if test "x$cross_compiling" != "xyes"; then AC_MSG_CHECKING([for best native curses library]) orig_cflags="$CFLAGS" for curses_lib in ${preferl}; do if ! ${curses_lib}-config --cflags >/dev/null 2>/dev/null; then continue fi CFLAGS="$orig_cflags $(${curses_lib}-config --cflags)" LIBS="$orig_libs $(${curses_lib}-config --libs)" AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include ]], [[ WINDOW *w = NULL; mvwprintw(w, 2, 2, "Testing %s", "o hai"); ]])], [ curses=yes optlist="$optlist ${curses_lib}/curses" AC_MSG_RESULT([$curses_lib]) NCURSES_LIBS=`${curses_lib}-config --libs` NCURSES_CPPFLAGS=`${curses_lib}-config --cflags` break ], [ AC_MSG_WARN([${curses_lib} doesn't seem to be installed properly]) ]) done CFLAGS="$orig_cflags" if test "x$curses" != "xyes"; then AC_MSG_RESULT([none?]) fi fi if test "x$curses" != "xyes"; then sym=addstr AC_SEARCH_LIBS(${sym}, ${preferl}, [ curses=yes eval "curseslib=\"\${ac_cv_search_${sym}}\"" barelib="${curseslib/-l/}" optlist="$optlist ${barelib}/curses" if test "x${curseslib}" != "xnone required"; then NCURSES_LIBS="${curseslib}" fi # Need to check for headers in subdirectories, to ensure we get wide stuff AC_MSG_CHECKING([for curses header subdirectory]) barelib="${barelib/6/}" barelib="${barelib/5/}" cursesincl=`echo '#include <'"${barelib}"'/curses.h>' | ${CPP} -M - 2>/dev/null | sed -E -e 's/^[^:]+:[[:space:]]*(([^[:space:]]|\\\\[[:space:]])*)curses\\.h([[:space:]].*|$)$/\\1/' -e 't my' -e d -e ': my' -e 's/\\\\?([\\\\[:space:]])/\\\\\\1/g'` if test "x$cursesincl" != "x"; then NCURSES_CPPFLAGS="-I${cursesincl}" AC_MSG_RESULT([$cursesincl]) else AC_MSG_RESULT([none found]) fi break ], [ if test "x$curses" = "xyes"; then AC_MSG_ERROR([Could not find curses library - please install libncurses-dev or pdcurses-dev (or configure --without-curses)]) else AC_MSG_WARN([Could not find curses library - if you want a TUI, install libncurses-dev or pdcurses-dev]) curses=no optlist="$optlist curses" fi ]) fi if test "x$curses" = "xyes"; then AC_DEFINE([HAVE_CURSES], [1], [Defined to 1 if curses TUI support is wanted]) AC_MSG_CHECKING([whether curses library supports wide characters]) LIBS="$orig_libs $NCURSES_CPPFLAGS $NCURSES_LIBS" AC_LINK_IFELSE([ AC_LANG_PROGRAM([ #define PDC_WIDE #include ],[ addwstr(L"test"); add_wch(WACS_VLINE); ]) ],[ AC_MSG_RESULT([yes]) AC_DEFINE([USE_UNICODE],[1],[Defined to 1 if curses supports wide characters]) ],[ AC_MSG_RESULT([no]) ]) fi LIBS="$orig_libs" fi maybe_ldconfig= AC_ARG_WITH([system-libblkmaker], [AC_HELP_STRING([--with-system-libblkmaker], [Use system libblkmaker rather than bundled one (default disabled)])],[true],[with_system_libblkmaker=no]) if test "x$with_system_libblkmaker" = "xyes"; then PKG_CHECK_MODULES([libblkmaker],[libblkmaker_jansson-0.1],[ true ],[ AC_MSG_ERROR([Could not find system libblkmaker]) ]) else save_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS -Wl,-zorigin" origin_LDFLAGS= AC_MSG_CHECKING([whether the linker recognizes the -zorigin option]) AC_TRY_LINK([],[],[ AC_MSG_RESULT([yes]) origin_LDFLAGS=',-zorigin' ],[ AC_MSG_RESULT([no]) ]) LDFLAGS="$save_LDFLAGS" libblkmaker_CFLAGS='-Ilibblkmaker' libblkmaker_LDFLAGS='-Llibblkmaker/.libs -Wl,-rpath,\$$ORIGIN/libblkmaker/.libs'"$origin_LDFLAGS" libblkmaker_LIBS='-lblkmaker_jansson-0.1 -lblkmaker-0.1' AC_CONFIG_SUBDIRS([libblkmaker]) _ROOTPATH=$PATH$PATH_SEPARATOR`echo $PATH | sed s/bin/sbin/g` possible_ldconfigs="${target}-ldconfig" if test "x$cross_compiling" != "xyes"; then possible_ldconfigs="${possible_ldconfigs} ldconfig" fi AC_CHECK_PROGS([LDCONFIG],[${possible_ldconfigs}],[],[$_ROOTPATH]) if test "x$LDCONFIG" != "x"; then maybe_ldconfig=" && $LDCONFIG" fi fi AC_SUBST(libblkmaker_CFLAGS) AC_SUBST(libblkmaker_LDFLAGS) AC_SUBST(libblkmaker_LIBS) $have_udevrules || use_udevrules=false if $use_udevrules; then AC_ARG_WITH([udevrulesdir], AS_HELP_STRING([--with-udevrulesdir=DIR], [Install udev rules into this directory]), [], [ if test -d /lib/udev/rules.d; then with_udevrulesdir=/lib/udev/rules.d else with_udevrulesdir=no fi ] ) if test "x$with_udevrulesdir" = "xno"; then use_udevrules=false fi if $use_udevrules; then AC_SUBST([udevrulesdir], [$with_udevrulesdir]) fi fi AM_CONDITIONAL([NEED_LIBBLKMAKER], [test x$with_system_libblkmaker != xyes]) AM_CONDITIONAL([NEED_BFG_BINLOADER], [test x$need_binloader = xyes]) AM_CONDITIONAL([NEED_DYNCLOCK], [test x$need_dynclock = xyes]) AM_CONDITIONAL([NEED_BFG_LOWL_VCOM], [test x$need_lowl_vcom = xyes]) AM_CONDITIONAL([NEED_BFG_LOWL_HID], [test x$need_lowl_hid = xyes]) AM_CONDITIONAL([NEED_BFG_LOWLEVEL], [test x$need_lowlevel = xyes]) AM_CONDITIONAL([HAS_SCRYPT], [test x$scrypt = xyes]) AM_CONDITIONAL([HAVE_CURSES], [test x$curses = xyes]) AM_CONDITIONAL([HAVE_SENSORS], [test x$with_sensors = xyes]) AM_CONDITIONAL([HAVE_CYGWIN], [test x$have_cygwin = xtrue]) AM_CONDITIONAL([HAVE_LIBUSB], [test x$libusb = xyes]) AM_CONDITIONAL([HAVE_WINDOWS], [test x$have_win32 = xtrue]) AM_CONDITIONAL([HAVE_x86_64], [test x$have_x86_64 = xtrue]) AM_CONDITIONAL([HAVE_WIN_DDKUSB], [test x$found_ddkusb = xtrue]) AM_CONDITIONAL([HAS_FPGA], [test x$has_fpga != xno]) AM_CONDITIONAL([HAS_ASIC], [test x$has_asic != xno]) AM_CONDITIONAL([USE_UDEVRULES], [$use_udevrules]) dnl Find YASM has_yasm=false if test "x$have_x86_32$have_x86_64" != "xfalsefalse"; then AC_PATH_PROG([YASM],[yasm],[false]) if test "x$YASM" != "xfalse" ; then has_yasm_enableaction="install yasm 1.0.1+" AC_MSG_CHECKING([if yasm version is greater than 1.0.1]) yasmver=`"$YASM" --version | head -1 | cut -d\ -f2` yamajor=`echo $yasmver | cut -d. -f1` yaminor=`echo $yasmver | cut -d. -f2` yamini=`echo $yasmver | cut -d. -f3` if test "$yamajor" -ge "1" ; then if test "$yamajor" -eq "1" ; then if test "$yaminor" -ge "0" ; then if test "$yaminor" -eq "0"; then if test "$yamini" -ge "1"; then has_yasm=true fi else has_yasm=true fi fi fi else has_yasm=false fi if test "x$has_yasm" = "xtrue" ; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi fi if test "x$has_yasm" = "xfalse" ; then AC_MSG_NOTICE([yasm is required for the assembly algorithms. They will be skipped.]) else AC_DEFINE([HAVE_YASM], [1], [Defined to 1 if yasm is being used]) if test "x$have_win32$have_cygwin" != "xfalsefalse"; then if test "x$have_x86_64" = xtrue; then YASM_FMT="win64" else YASM_FMT="coff" fi elif test "x$have_macho" = "xtrue"; then YASM_FMT="macho$bitness" else YASM_FMT="elf$bitness" fi fi fi AM_CONDITIONAL([HAS_YASM], [test x$has_yasm = xtrue]) have_sse2=no if test "x$cpumining$have_x86_32" = "xyestrue"; then AC_MSG_CHECKING([if SSE2 code compiles]) save_CFLAGS="$CFLAGS" for flags in '' '-msse2'; do CFLAGS="$CFLAGS $flags" AC_TRY_LINK([ #include ],[ int *i = (int *)0xdeadbeef; __m128i a, b; a = _mm_set1_epi32(i[0]); b = _mm_set_epi32(i[0], i[1], i[2], i[3]); a = _mm_add_epi32(a, b); a = _mm_andnot_si128(a, b); a = _mm_or_si128(a, b); a = _mm_slli_epi32(a, i[4]); a = _mm_and_si128(a, b); a = _mm_xor_si128(a, b); ],[ if test "x$flags" = "x"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([with $flags]) fi SSE2_CFLAGS="$flags" have_sse2=yes break ],[ true ]) done CFLAGS="${save_CFLAGS}" if test "x$have_sse2" = "xyes"; then AC_DEFINE([HAVE_SSE2], [1], [Defined to 1 if yasm is being used]) else AC_MSG_RESULT([no]) fi fi AM_CONDITIONAL([HAVE_SSE2], [test "x$have_sse2" = "xyes"]) if test "x$need_lowl_vcom" = "xyes"; then AC_ARG_WITH([libudev], [AC_HELP_STRING([--without-libudev], [Autodetect FPGAs using libudev (default enabled)])], [libudev=$withval], [libudev=auto] ) if test "x$libudev" != "xno"; then AC_CHECK_HEADER([libudev.h],[ libudev=yes UDEV_LIBS=-ludev AC_DEFINE([HAVE_LIBUDEV], [1], [Defined to 1 if libudev is wanted]) ], [ if test "x$libudev" = "xyes"; then AC_MSG_ERROR([libudev not found]) fi libudev=no ]) fi fi AM_CONDITIONAL([HAVE_LIBUDEV], [test x$libudev != xno]) AC_SUBST(LIBUSB_LIBS) AC_SUBST(LIBUSB_CFLAGS) PKG_CHECK_MODULES([LIBCURL], [libcurl >= 7.18.2], ,[AC_MSG_ERROR([Missing required libcurl dev >= 7.18.2])]) if echo "$LIBCURL_CFLAGS" | grep '@CPPFLAG_CURL_STATICLIB@' >/dev/null 2>&1; then AC_MSG_WARN([Your libcurl pkgconfig file is broken, applying workaround]) LIBCURL_CFLAGS=`echo "$LIBCURL_CFLAGS" | sed 's/@CPPFLAG_CURL_STATICLIB@//'` fi AC_SUBST(LIBCURL_LIBS) AC_CHECK_FUNCS([setrlimit]) dnl CCAN wants to know a lot of vars. # All the configuration checks. Regrettably, the __attribute__ checks will # give false positives on old GCCs, since they just cause warnings. But that's # fairly harmless. AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((cold)) cleanup(void) { }])], AC_DEFINE([HAVE_ATTRIBUTE_COLD], [1], [Define if __attribute__((cold))])) AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((const)) cleanup(void) { }])], AC_DEFINE([HAVE_ATTRIBUTE_CONST], [1], [Define if __attribute__((const))])) AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((noreturn)) cleanup(void) { exit(1); }])], [ AC_DEFINE([HAVE_ATTRIBUTE_NORETURN], [1], [Define if __attribute__((noreturn))]) AC_DEFINE_UNQUOTED([NORETURN], [__attribute__((noreturn))], [Syntax of noreturn attribute]) ], [ AC_DEFINE_UNQUOTED([NORETURN], []) ] ) AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((format(__printf__, 1, 2))) cleanup(const char *fmt, ...) { }])], AC_DEFINE([HAVE_ATTRIBUTE_PRINTF], [1], [Define if __attribute__((format(__printf__)))])) AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((unused)) cleanup(void) { }])], AC_DEFINE([HAVE_ATTRIBUTE_UNUSED], [1], [Define if __attribute__((unused))])) AC_COMPILE_IFELSE([AC_LANG_SOURCE([static void __attribute__((used)) cleanup(void) { }])], AC_DEFINE([HAVE_ATTRIBUTE_USED], [1], [Define if __attribute__((used))])) AC_LINK_IFELSE([AC_LANG_SOURCE([int main(void) { return __builtin_constant_p(1) ? 0 : 1; }])], AC_DEFINE([HAVE_BUILTIN_CONSTANT_P], [1], [Define if have __builtin_constant_p])) AC_LINK_IFELSE([AC_LANG_SOURCE([int main(void) { return __builtin_types_compatible_p(char *, int) ? 1 : 0; }])], AC_DEFINE([HAVE_BUILTIN_TYPES_COMPATIBLE_P], [1], [Define if have __builtin_types_compatible_p])) AC_COMPILE_IFELSE([AC_LANG_SOURCE([static int __attribute__((warn_unused_result)) func(int x) { return x; }])], AC_DEFINE([HAVE_WARN_UNUSED_RESULT], [1], [Define if __attribute__((warn_unused_result))])) # byteswap functions AH_TEMPLATE([HAVE_BYTESWAP_H], [Define to use byteswap macros from byteswap.h]) AH_TEMPLATE([HAVE_ENDIAN_H], [Define to use byteswap macros from endian.h]) AH_TEMPLATE([HAVE_SYS_ENDIAN_H], [Define to use byteswap macros from sys/endian.h]) AH_TEMPLATE([HAVE_LIBKERN_OSBYTEORDER_H], [Define to use byteswap macros from libkern/OSByteOrder.h]) BSWAP='' for sym in bswap_ __builtin_bswap __bswap_ __swap swap OSSwapInt; do AC_MSG_CHECKING([for ${sym}* functions]) for headerfile in '' byteswap.h endian.h sys/endian.h libkern/OSByteOrder.h; do BFG_INCLUDE([headerinclude], [${headerfile}]) AC_LINK_IFELSE([ AC_LANG_PROGRAM([ ${headerinclude} ], [ (void) ${sym}16(0); (void) ${sym}32(0); (void) ${sym}64(0); ]) ], [ BSWAP="${sym}" if test "x${headerfile}" = "x"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([found in ${headerfile}]) AC_DEFINE_UNQUOTED(AS_TR_CPP([HAVE_$headerfile]), 1) fi break 2 ]) done AC_MSG_RESULT([no]) done if test "x$BSWAP" = "x"; then true # Substitutes are provided in miner.h elif test "x$BSWAP" = "xbswap_"; then AC_MSG_CHECKING([if bswap_16 is already a macro]) BFG_PREPROC_IFELSE([defined(bswap_16)], $headerfile, [ AC_MSG_RESULT([yes]) BSWAP="" ],[ AC_MSG_RESULT([no]) ]) fi if test "x$BSWAP" != "x"; then AC_DEFINE_UNQUOTED([bswap_16], ${BSWAP}16, [Define to 16-bit byteswap macro]) AC_DEFINE_UNQUOTED([bswap_32], ${BSWAP}32, [Define to 16-bit byteswap macro]) AC_DEFINE_UNQUOTED([bswap_64], ${BSWAP}64, [Define to 16-bit byteswap macro]) fi # endian definition macros AC_MSG_CHECKING([for platform endian]) found_endian=no for headerfile in '' endian.h sys/endian.h sys/param.h; do for pfx in '' '__'; do BFG_PREPROC_IFELSE([defined(${pfx}BYTE_ORDER) && defined(${pfx}BIG_ENDIAN) && defined(${pfx}LITTLE_ENDIAN) && (${pfx}BYTE_ORDER == ${pfx}BIG_ENDIAN || ${pfx}BYTE_ORDER == ${pfx}LITTLE_ENDIAN)], ${headerfile}, [ if test "x$headerfile" = "x"; then headerfilec='' else headerfilec=" (${headerfile})" fi BFG_PREPROC_IFELSE([${pfx}BYTE_ORDER == ${pfx}BIG_ENDIAN], ${headerfile}, [ AC_MSG_RESULT([big endian${headerfilec}]) AC_DEFINE(WORDS_BIGENDIAN, 1, [Define if your platform is big endian]) ], [ AC_MSG_RESULT([little endian${headerfilec}]) ]) found_endian=yes break 2 ],[true]) done done if test "x$found_endian" = "xno"; then if $have_win32 || $have_cygwin; then AC_MSG_RESULT([assuming little endian (Windows)]) else # AC_C_BIGENDIAN is reported to have problems, and invasive even if buried in a conditional, so don't use it AC_MSG_RESULT([unknown]) AC_MSG_ERROR([Unable to identify platform endian]) fi fi AC_MSG_CHECKING([if GNU format attribute compiles]) AC_TRY_COMPILE([ #define FORMAT_SYNTAX_CHECK(...) __attribute__(( format(__VA_ARGS__) )) int myfunc(char *fmt, ...) FORMAT_SYNTAX_CHECK(printf, 1, 2); int myfunc(char *fmt, ...) { return 42; } ], [ myfunc("abc%d", 42); ], [ AC_MSG_RESULT([yes]) AC_DEFINE_UNQUOTED([FORMAT_SYNTAX_CHECK(...)], [__attribute__(( format(__VA_ARGS__) ))], [Syntax of format-checking attribute]) ], [ AC_MSG_RESULT([no]) AC_DEFINE_UNQUOTED([FORMAT_SYNTAX_CHECK(...)], []) ]) AC_MSG_CHECKING([for clock_gettime(CLOCK_MONOTONIC)]) AC_TRY_COMPILE([ #define _GNU_SOURCE #include ],[ struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); ],[ AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_CLOCK_GETTIME_MONOTONIC], [1], [Defined to 1 if clock_gettime(CLOCK_MONOTONIC) is defined]) save_LIBS="${LIBS}" AC_SEARCH_LIBS([clock_gettime],[rt posix4],[ if test "x${ac_cv_search_clock_gettime}" != "xnone required"; then RT_LIBS="${ac_cv_search_clock_gettime}" fi ]) LIBS="${save_LIBS}" AC_CHECK_FUNCS([clock_nanosleep]) AC_MSG_CHECKING([for clock_gettime(CLOCK_MONOTONIC_RAW)]) AC_TRY_COMPILE([ #define _GNU_SOURCE #include ],[ struct timespec ts; clock_gettime(CLOCK_MONOTONIC_RAW, &ts); ],[ AC_MSG_RESULT([yes]) AC_DEFINE([HAVE_CLOCK_GETTIME_MONOTONIC_RAW], [1], [Defined to 1 if clock_gettime(CLOCK_MONOTONIC_RAW) is defined]) ],[ AC_MSG_RESULT([no]) ]) ],[ AC_MSG_RESULT([no]) ]) if test "x$prefix" = xNONE; then prefix=/usr/local fi AM_CONDITIONAL([NEED_BITSTREAM_FPGAMINER], [test x$modminer$x6500 != xnono]) AC_DEFINE_UNQUOTED([PHATK_KERNNAME], ["phatk121016"], [Filename for phatk kernel]) AC_DEFINE_UNQUOTED([POCLBM_KERNNAME], ["poclbm130302"], [Filename for poclbm kernel]) AC_DEFINE_UNQUOTED([DIAKGCN_KERNNAME], ["diakgcn121016"], [Filename for diakgcn kernel]) AC_DEFINE_UNQUOTED([DIABLO_KERNNAME], ["diablo130302"], [Filename for diablo kernel]) AC_DEFINE_UNQUOTED([SCRYPT_KERNNAME], ["scrypt130511"], [Filename for scrypt kernel]) m4_define([BFG_PRINT_LIST],[ eval _mylist="\$$2" _yeslist= _nolist= _enableactions= for _opt in $_mylist; do IFS=/ read _opt _var < Wed, 15 Jan 2014 20:16:48 -0000 bfgminer (3.9.0-0precise1) precise; urgency=low * hashbusterusb: Voltage/VRM controls, and support for identify function (5 second LED colour change). * nanofury: Support for identify function by turning LED off for 5 seconds. * twinfury: Support for voltage information/control. * Linux: New udev rules file to automatically put supported (and autodetectable) mining devices in the "video" UNIX group. -- Luke Dashjr Wed, 25 Dec 2013 00:25:57 -0000 bfgminer (3.8.1-0precise1) precise; urgency=low * Bug fixes only. -- Luke Dashjr Mon, 09 Dec 2013 03:19:55 -0000 bfgminer (3.8.0-0precise1) precise; urgency=low * New driver for HashBuster Micro boards. * klondike: Add support for boards built with 55nm Avalon2 chips. -- Luke Dashjr Mon, 02 Dec 2013 01:35:48 -0000 bfgminer (3.7.0-0precise1) precise; urgency=low * Support for new Bi*fury and Twinfury mining devices. * Device probing has been rewritten to scan all devices in parallel (much faster). As a result, the order of devices found is no longer deterministic (consistent) at all. * Parallel device probing makes --device's former syntax unusable, so it has been modified to use the same pattern matching as --set-device. Note that things like --scan-serial ztex:all are now valid, but probably much more dangerous than with VCOM drivers; a bare driverless "all" only affects VCOM drivers for this reason. * Pattern matching for --set-device (and now --device) supports identifying specific devices by serial number or path. To use this, insert either of these between @ symbols where you would have previously put an index. So for example, --device 'BFL@FTWOTOP4' (bitforce device with serial FTWOTOP4) or --set-device '@/dev/ttyUSB0@a:osc6_bits=52' (first processor of device on /dev/ttyUSB0). * BFGMiner-written config files now include the scan-serial and set-device settings. * cpu and opencl drivers have been disabled by default in all cases. Use -S cpu:auto or -S opencl:auto if you need them. * Automatically workaround cgminer driver breakage on Linux. -- Luke Dashjr Wed, 27 Nov 2013 13:46:42 -0000 bfgminer (3.6.0-1precise1) precise; urgency=medium * Explicitly added OpenCL to Debian build rules. -- graeme Thu, 21 Nov 2013 10:19:10 -0800 bfgminer (3.6.0-0precise1) precise; urgency=low * klondike: New driver, just imported from cgminer mostly as-is for now. * opencl: Support will remain, but it is not compiled by default. Use --enable-opencl to build it. -- Luke Dashjr Tue, 12 Nov 2013 07:55:01 -0000 bfgminer (3.5.2-0precise1) precise; urgency=low * Bug fixes only. -- Luke Dashjr Tue, 12 Nov 2013 07:59:00 -0000 bfgminer (3.5.1-0precise1) precise; urgency=low * Bug fixes only. -- Luke Dashjr Thu, 07 Nov 2013 03:42:48 -0000 bfgminer (3.5.0-0precise1) precise; urgency=low * hashbuster: New driver for HasbBuster nano ASIC miners. * littlefury: Updated driver to use latest bitfury code. * Autodetection support for Windows extended to all VCOM-based devices (instead of just FTDI, libusb, and hidapi); this includes at least ModMiner, Big Picture Mining (BF1, RedFury, BlueFury), and USB Block Erupters. * Reworked autodetection to scan system devices once and share results with all drivers. -- Luke Dashjr Wed, 06 Nov 2013 05:52:28 -0000 bfgminer (3.4.0-0precise1) precise; urgency=low * New drivers for KnC miners and NanoFury USB miners. * Per-chip dynamic osc6_bits detection for BFSB, MegaBigPower, and Metabank bitfury boards. * Ability to set default options for various devices using --set-device commandline option; for example, if you have all your NanoFury miners in a USB 3 hub (with 900 mA power available), you can increase the oscillator speed with: --set-device NFY:osc6_bits=53 (or "set-device":["NFY:osc6_bits=53"] in a config file). * Con Kolivas's completely rewritten load-balance strategy with per pool quota support. See the updated README to see how it works. * If we switch away from a pool in failover mode, we will now only switch back to it if it's up for at least 5 minutes to avoid reconnecting to pools that are only intermittently up - good for DDoS situations which we've seen a lot of lately. (by Con Kolivas) * Updated miner.php from Kano. * Proxy settings now default to SOCKS5 instead of SOCKS4. Prefix your proxy IP with "socks4://" if you need to use the older protocol. -- Luke Dashjr Sat, 26 Oct 2013 19:53:59 -0000 bfgminer (3.3.0-0precise1) precise; urgency=low * New drivers for bitfury-based devices: BFSB/MegaBigPower(v2 only!) kits, Big Picture Mining USB (BF1, RedFury, BlueFury, etc), LittleFury, and Metabank. See README.ASIC for setup instructions. * Support for proxy virtual devices has been extended to include the stratum protocol when the upstream pool selected is also stratum and supplies sufficient extranonce2 space. If the upstream pool does not meet this criteria, stratum clients will be disconnected and new ones will fail to subscribe. You can take advantage of this to failover to the getwork proxy. Support for upstream getwork pools is impossble, but GBT is planned. * opencl: Driver is disabled by default if FPGAs or ASICs are detected. To enable, use -S opencl:auto * Third hashrate displayed is now based on nonces found, adjusted for pool rejected/stale shares. It should still be approximately equivalent to your effective/earning hashrate, but with better accuracy. * Utility on the TUI status line has been replaced with expected Income, measured as 100% PPS BTC per hour for easy comparison with electric costs. * RPC: Methods shared with cgminer now only consider entire devices, to be more compatible. A set of new processor-detail methods have been added to get full information per-processor. * RPC: Old devdetail method has been made compatible with cgminer devdetails and renamed. * New options --chroot-dir and --setuid on POSIX systems (thanks Ricardo!). -- Luke Dashjr Fri, 11 Oct 2013 20:32:15 -0000 bfgminer (3.2.1-0precise1) precise; urgency=low * Allow startup with no devices, even if RPC and HTTP server are disabled, so long as the user can add new ones via the curses TUI. -- Luke Dashjr Thu, 19 Sep 2013 04:37:01 -0000 bfgminer (3.2.0-0precise1) precise; urgency=low * modminer, x6500 & ztex: Completely removed bitstreams from BFGMiner distribution, and replaced with instructions on where to find them officially. * New embedded getwork server to drive Block Erupter Blades and similar devices off BFGMiner. Instructions in README.ASIC. * Fixed stratum pool-switch crash regressed in BFGMiner 3.1.0. * Added stratum support for --request-diff by attempting to send a mining.suggest_target notification to pools when connecting. * erupter & icarus: Various optimizations to catch racing nonces, and otherwise reduce hardware errors. * Enable simple notifications for idle/sick/dead devices via --cmd-idle, --cmd-sick and --cmd-dead options that execute commands when the events occur. * Support for probing/hotplugging new devices from the TUI ([M]anage, plus key). * Simple multicast RPC detection. * Imported Aaron D. Gifford's fast SHA256 implementation (used for share validation on CPU). * Due to random font issues, Unicode is disabled by default. Use --unicode to enable it. -- Luke Dashjr Thu, 29 Aug 2013 23:54:22 -0000 bfgminer (3.1.4-0precise1) precise; urgency=low * Enable triggering "identify" (flash LED) function from Manage device TUI for devices that support it. * modminer & x6500: Enable changing clock frequency from Manage devices TUI interface. * Device ailments (SICK, ERR, etc) are now displayed in red/pink. * erupter: Add support for "identify" (flash LED) function. After some delay (up to 13 seconds), the LED will light for 3 seconds (hashing stops during this time). * bitforce: Display voltages in Manage device TUI. * Reorganised TUI display, adding network transfer rate, and totals summary line including count of total devices/processors hashing as well as hottest temperature across all sensors. * Improved timer functionality under the hood, taking advantage of clock_gettime. This should fix issues (eg, erupters reporting insane hashrates) related to system clock changes (such as NTP drift adjustment). * Removed blank/wasted lines above/below log window in TUI mode. * bitforce: Clear queues at startup to avoid spurious warnings. * Stratum: Add support for rolling ntime header. -- Luke Dashjr Fri, 02 Aug 2013 02:28:17 -0000 bfgminer (3.1.3-0precise1) precise; urgency=low * Fix 100% CPU usage hang with GBT/getwork pools. * Make staged work underrun detection less overly aggressive. * Generate baud rate list from OS on *nix (fixes Mac/BSD build). -- Luke Dashjr Thu, 11 Jul 2013 18:52:25 -0000 bfgminer (3.1.2-0precise1) precise; urgency=low * TUI: The "GPU management" interface has been replaced with a new generic "Manage devices" interface, allowing easy enable and disable of non-GPU devices. * Major CPU usage reduction for faster mining rigs (on my minirig host system, from 35% down to 13%!). * erupter: New icarus-based driver to handle autodetection of (branded) Block Erupter devices. * opencl: Add support for AMD Catalyst 13.2+ drivers. * The device statlines have been condensed by reducing the device-specific space down to a single temperature reading. More detailed information (such as GPU fan speeds) is still available via RPC and the new "Manage devices" interface. * RPC: New "devscan" command to probe for new devices on demand. The effect is the same as starting BFGMiner with -S noauto -S . * TUI: Display percentage invalid of found nonces with hw errors. * cpu & opencl: These legacy drivers now respect the --scan-serial auto/noauto directives, and the old -C (enable CPU) and -G (disable GPU) options are now deprecated. -- Luke Dashjr Mon, 08 Jul 2013 18:46:53 -0000 bfgminer (3.1.1-0precise1) precise; urgency=low * bitforce: Support for Little Single boards, and Jalapenos with 1.2 firmware. * avalon: Support new overclocking speeds (325, 350 and 375). * Fixed a bunch of bugs, including GPU mining and at least one stratum-related crash. -- Luke Dashjr Thu, 22 Jun 2013 17:41:21 -0000 bfgminer (3.1.0-1precise1) precise; urgency=low * Properly added in new submodules to Debian packaging. * Added missing fpgautils.h include. -- graeme Fri, 14 Jun 2013 09:52:55 -0700 bfgminer (3.1.0-0precise1) precise; urgency=low * BitForce: Support for new long-board job-per-chip protocol. * Reformatted TUI summaries, including percentage of shares rejected or stale. * Icarus-compatible devices: Autodetect work_division by default (eg, for Block Erupter USB ASICs). * Avalon: Driver is now compiled in by default, including in official binaries * Avalon: Will only configure with specific -S avalon:/dev/ttyUSBn option (note the driver name must be included), since there is no reliable probe for it. * Avalon: Full data for 4-module devices in RPC. * OpenCL: Support for reading temperature from free software radeon drivers via libsensors. * Fixes for writing configuration files when --device and/or --remove-disabled are specified. * Faster startup now when pools are slow, connecting to the first pool available! * Adding a pool now from the menu will return quickly while it leaves probing it to the background. * Scrypt: Fixed the failure to create high thread concurrencies. * Scrypt: Intensities above 13 will not lead to garbage being generated on GCN (7xxx) hardware now. * Scrypt: Decreased default GPU threads to 1. Intensities above 13 cannot handle more. * Scrypt: Changed the default choice of thread concurrency if no options are passed as a hint, based on amount of available GPU RAM. * Scrypt: Extensive updates to the SCRYPT-README file in line with changes to the code and new information. * Scrypt: Slightly larger buffer which may lead to less hardware errors on very fast GPUs. * CPU mining: Numerous improvements to portability. -- Luke Dashjr Thu, 13 Jun 2013 20:05:29 -0000 bfgminer (3.0.2-1precise1) precise; urgency=low * Added scrypt build option. * Fixed dependancies on raring. -- Luke Dashjr Sun, 28 Apr 2013 05:22:53 -0000 bfgminer (3.0.2-0precise1) precise; urgency=low * Fix stratum on Windows. -- Luke Dashjr Sun, 28 Apr 2013 05:22:53 -0000 bfgminer (3.0.1-0precise1) precise; urgency=low * Bitforce SC: Switched to using bulk queue mode for all ASIC-based devices. * Bitforce SC: Fix reinitialization-on-failure, so devices can autorecover from some problems. * OpenCL: Support for mining with completely free (non-proprietary) Mesa/LLVM OpenCL on Linux (requires git Mesa/LLVM). * OpenCL: Include platform in kernel binary filenames to avoid using stale binaries from other SDK installs. * Added network hashrate (based on block difficulty) to block status line. -- Luke Dashjr Wed, 24 Apr 2013 02:40:23 -0000 bfgminer (3.0.0-0precise1) precise; urgency=low * New device driver for ASIC devices, including both Butterfly Labs' BitForce SC and Avalon. * Enhanced device driver API, enabling devices to asynchronously handle multiple slave processors. Use --show-processors to view each individually (not yet supported for Avalon). * You can now use --request-diff to ask for a specific share target from pools that support BIP 23 GBT Basic Pool Extensions. * Support for submitting found blocks to a local Bitcoin GBT server (bitcoind or Bitcoin-Qt with -server flag): just append #allblocks to the end of your bitcoind's URI. * Stratum connection resuming support - if you lose an active stratum connection, BFGMiner will attempt to resubmit any lost shares when it reconnects. * Android target support. You will have to compile it yourself, but it should just work - no patching needed. * New Python RPC client example from Christian Berendt. -- Luke Dashjr Fri, 05 Apr 2013 17:12:20 -0000 bfgminer (2.10.5-0precise1) precise; urgency=low * Fix critical solo mining bug. -- Luke Dashjr Fri, 08 Feb 2013 06:43:27 -0000 bfgminer (2.10.4-0precise1) precise; urgency=low * Better big-endian compatibility. * Kano's RPC example is now included as the bfgminer-rpc program. * Improved bitstream for ModMiners, with gains of about 10 Mh/s on average. This was originally supposed to be part of BFGMiner 2.9.2, but somehow slipped through the cracks! You'll need to power cycle your board, or use the --force-dev-init option to upload the new bitstream. * Many bugfixes and some small improvements all over. -- Luke Dashjr Thu, 07 Feb 2013 04:49:38 -0000 bfgminer (2.10.3-0precise1) precise; urgency=low * Current block display shows end of the previous block hash and (when GBT or Stratum servers are active) the next block's (that you're looking for) height. * miner.php and RPC API updates from Kano, including multiuser support for miner.php * Finally fix long-standing scrypt difficulty and u-hashrate calculation bugs. -- Luke Dashjr Tue, 22 Jan 2013 00:20:02 -0000 bfgminer (2.10.2-0precise1) precise; urgency=low * There is now a [Z]ero stats option under the display menu which resets all the visible statistics. * The current pool and block target difficulties are now shown in the status lines. * Block solve detection is now supported with scrypt mining as well (BLOCK! written at end of share and solved blocks listed under pool stats). * Stratum support for scrypt mining. * Numerous subtle bugfixes. -- Luke Dashjr Thu, 27 Dec 2012 11:20:53 -0000 bfgminer (2.10.1-0precise1) precise; urgency=low * modminer: Enable frequency adjustments up to 230 MHz, and allow changing the frequency (non-sticky) via the new "pgaset" RPC command. * Numerous stratum fixes, including failover recovery. Also allow using stratum with scrypt. * Replaced the work-based efficiency with new bandwidth-based efficiency. -- Luke Dashjr Fri, 21 Dec 2012 07:29:23 -0000 bfgminer (2.10.0-0precise1) precise; urgency=low * Avoid fetching more GBT jobs when we already have a usable one, or are already in the process of getting a new one that will suffice. This drastically improves efficiency on GBT pools and reduces unnecessary bandwidth waste. * Share submissions are now processed asyncronously in parallel, in a single dedicated thread, instead of spawning a new thread for every submission. This makes BFGMiner scale better to bigger rigs with more shares, and enables it to react to network outages better. Additionally, a new "AS" (Active Submissions) number has been added to the status line to make it easy to see at a glance what's going on. * New --skip-security-checks option to allow miners to skip checks when it saves bandwidth (stratum). * Additional extranonce to count template requests for GBT solo mining, to ensure we never work on hashing the same data redundantly. * Con's new work scheduler: The old work scheduler would spawn threads that all tried to grab work as best as they could, and this would lead to much more work than necessary being grabbed from getwork pools, and potentially hitting the pool at precisely the same time from multiple threads making a getwork failure more likely. It was also very difficult to track how much work was really available at any one time since all the threads were off doing their own thing. Centralising the work creation means it is strictly tracked now and as soon as one work item is taken, the scheduler will generate or download another one. The advantage here is to maximise the efficiency of work we get from any getwork source, be it with or without rolltime. It is also much less likely to have dips in providing work, should lead to less getwork failures, and scale to higher hashrates even with the old getwork protocols. * When the primary pool is stratum, GBT, or getwork in failover mode, no backup connections will be maintained to backup pools. The total number of unused stratum connections now should be extremely small. * Ztex driver improvements courtesy of Denis and Peter. * Lots of work under the hood and other minor goodies. Check full changelog. -- Luke Dashjr Tue, 11 Dec 2012 05:28:02 -0000 bfgminer (2.9.5-0precise1) precise; urgency=low * Bug fixes only. -- Luke Dashjr Tue, 11 Dec 2012 03:11:49 -0000 bfgminer (2.9.4-0precise1) precise; urgency=low * Documented solo mining in README. * Lots of small bugfixes everywhere. -- Luke Dashjr Tue, 04 Dec 2012 03:13:43 -0000 bfgminer (2.9.3-0precise1) precise; urgency=low * Many stratum bugfixes, including support for fractional difficulties. * Fix GBT to work properly with very short expiration times. -- Luke Dashjr Fri, 16 Nov 2012 03:49:51 -0000 bfgminer (2.9.2-0precise1) precise; urgency=low * modminer & x6500: New bitstream, improving functional performance by about 10 Mh/s on average. You'll need to power cycle your device, or use the new --force-dev-init option to upload the new bitstream. * x6500: Support for temperature sensors, including --temp-target & co. * x6500: Extended per-FPGA details in RPC API. * x6500: Support for older X6500 devices by manually specifying their serial numbers as --scan-serial x6500:serialnumber. * Some basic stratum transparency checks to help keep pools accountable using new mining.get_transactions method. -- Luke Dashjr Wed, 07 Nov 2012 18:46:00 -0000 bfgminer (2.9.1-0precise1) precise; urgency=low * RPC: Add support for X6500 FPGAs. * Enforce --expiry option to make solo mining without longpoll move on eventually. * Always detect non-scrypt if Stratum is in use. * Various other bug fixes, including a number of memory leaks. -- Luke Dashjr Sun, 30 Oct 2012 16:16:22 -0000 bfgminer (2.9.0-0precise1) precise; urgency=low * X6500: New driver for mining on FPGA Mining LLC's X6500 coprocessor. * Support for solo mining against bitcoind 0.7's GBT interface using new --coinbase-addr option (powered by libblkmaker 0.2.0). * OpenCL: Rewritten dynamic intensity code should fix remaining issues with it. * BFGMiner now displays the share's best difficulty as well as the current pool difficulty. * Support for saving and restoring pool priorities in config file, as well as changing them all at once in the curses TUI (by "blinkier"). * OpenCL: Updated kernels which should allow ultra low memory speeds once again (idea courtesy of Vbs). * Stratum support by Con. * BFGMiner now tries to actually catch crashes and unless the --no-restart option is given, it will try to restart cleanly. * OpenCL: Scrypt will now not fail when setting high thread concurrency values that still return some RAM even if OpenCL returns an error on that RAM allocation. -- Luke Dashjr Sun, 28 Oct 2012 10:39:56 -0000 bfgminer (2.8.3-0precise1) precise; urgency=low * Various bugs fixed, no new features. -- Luke Dashjr Mon, 18 Oct 2012 12:56:56 -0000 bfgminer (2.8.2-0precise1) precise; urgency=low * Numerous fixups for Enterpoint's Cairsmore dynamic clocking; it has actually been tested this time. :) * Support for --temp-target and --temp-hysteresis controls on ModMiner FPGA devices. * Generic failure management for all devices, including automatically attempting to restart dead devices. * Improved portability to new platforms, partially including Cygwin. * Various minor error handling improvements and bugfixes. -- Luke Dashjr Mon, 08 Oct 2012 23:13:01 -0000 bfgminer (2.8.1-0precise1) precise; urgency=low * Many improvements for Enterpoint's Cairsmore, including (experimental) support for Glasswalker's dynamic frequency bitstream. * New --coinbase-sig option that lets you embed a short tidbit in any blocks you personally find (only on GBT-enabled pools). * Generic dynamic clocking framework based on the Ztex driver's (written by nelisky), now used by ModMiner and (Glasswalker) Cairnsmore. * New RPC "identify" command to flash LEDs on some FPGAs (currently BitForce and GW Cairnsmore). * Include share difficulty information in log and RPC. * Lots of other various bugfixes and small improvements. -- Luke Dashjr Thu, 27 Sep 2012 06:20:05 -0000 bfgminer (2.8.0-0precise1) precise; urgency=low * Basic getblocktemplate decentralized mining protocol support, including rolling extranonce (based on libblkmaker 0.1). * New Cairnsmore driver, including autodetection (based in part on Kano's Icarus "timing" feature). * Support for per-pool proxies, based in part on Kano's similar work (but cleaned up and not compatible). * Numerous minor fixups to Ztex driver. * Minor improvements to Icarus driver, including the ability to disable the reopen quirk and detection when the default hash speed is wrong. * Updated RPC API from Kano to include debug and setconfig methods. * Updated miner.php from Kano to hide IP addresses for security (configurable). -- Luke Dashjr Sat, 15 Sep 2012 20:57:28 -0000 bfgminer (2.7.5-0precise1) precise; urgency=low * ModMiner: Revamped the dynamic clocking algorithm to keep FPGAs at higher speeds with fewer hardware errors. * OpenCL: Restored old default for Cypress (Radeon 58xx) worksize, to fix performance regression introduced in 2.7.2. * Failover is now fixed, courtesy of Con. * FPGAs: "-S all" option to try probing all enumerated serial ports on systems without non-miner serial devices. * Reverted "restart on ADL failure" feature that never did anything useful (if I'm wrong, complain and I'll make a switch to add it back) -- Luke Dashjr Thu, 27 Aug 2012 17:12:09 -0000 bfgminer (2.7.4-0precise1) precise; urgency=low * More fixes. -- Luke Dashjr Thu, 23 Aug 2012 21:48:02 -0000 bfgminer (2.7.3-0precise1) precise; urgency=low * Bugfixes for networking, new kernels, and updated miner.php. -- Luke Dashjr Thu, 23 Aug 2012 08:05:32 -0000 bfgminer (2.7.1-0precise1) precise; urgency=low * The occasional strange behaviour where lots of work would end up on backup pools in failover mode should be fixed. * The occasional scenario where one pool dies and the others behave like they're slow to provide work should be fixed. * Very high hashrate (U > 100) machines should now be able to work unhindered even on pools that don't support rolltime. * The --retry-pause/-R option no longer does anything and is deprecated in preference for faster communications and simpler code. * -r was removed as a shortcut for --retries. * Hopefully the dynamic mode for GPUs is fixed on windows. * Hopefully the 7 day windows crash bug has been fixed -- Luke Dashjr Tue, 21 Aug 2012 16:43:09 -0000 bfgminer (2.7.0-0precise1) precise; urgency=low * The main change in this version is a complete rewrite of the getwork requesting mechanism. Con had been slowly hacking away at the old one for some time, but finally gave up in disgust and has rewritten it to some extent. Previously, mining threads would occasionally throw out a request for more work, some arbitrary test would be done on whether more work should be requested, and it handed off the message to another thread which spawned another thread and that then sent the request and so on. Worse yet it was hard to find the right place to reuse work and so it was never reused to its utmost potential. The rewrite involves scheduling a new request based on the rate the old work items get used up, and is much better at predicting when it needs to leak work to backup pools and less likely to throw a "pool is not providing work fast enough" message. Overall you should now see much more Local Work (LW), the efficiency will be higher on pools that support rolltime, less work will be discarded, any magnitude rig will be kept solidly busy - note this MAY mean your overclocks will become that much more stressed if you have set clocks very aggressively. Thanks to Con and the numerous people who tested this during its development phase. * New pool strategy: Balance. * With the change to queueing and more roll work being possible than ever before, the imbalance between pools that support rolltime and those that don't will now be extreme in load balance strategy. To offset that, and since the number of people using load balance has been increasing, the new strategy was added to try and give roughly the same number of shares to each pool. * Other minor bugfixes. -- Luke Dashjr Tue, 21 Aug 2012 01:50:33 -0000 bfgminer (2.6.5-0precise1) precise; urgency=low * New BitFORCE firmware flash utility. When compiled with BitFORCE support, run `bitforce-firmware-flash` to get usage. * Fixed hanging when using "Switch User" on Windows. This feature still kills ADL, though, so use --no-restart (and don't enable fan control or overclocking) if you plan to use it. * Fixed dynamic mode on Windows. By default, Windows timer resolution is only 15ms and we're trying to sample much smaller than that. This was leading to the time taken to do GPU work appearing as zero for many samples. Now, a Windows multimedia timing API will be used to request more precision. * gpu-memdiff should now take effect when you change gpu engine clock from the menu as well. * The ADL gpu-map feature should work now when you have more ADL devices than OpenCL (eg, ATI cards that don't support OpenCL). * More tweaks to the queueing mechanism to increase efficiency and keep minirigs fully work laden. * Failover-only can now be changed via the RPC API. * Updated miner.php from Kano -- Luke Dashjr Mon, 20 Aug 2012 23:59:19 -0000 bfgminer (2.6.4-0precise1) precise; urgency=low * More drastic improvements to ModMiner clock adjustment algorithm. Downclocking should be more conservative, and BFGMiner will even raise the clock speed when it's doing well. * ModMiners are now kept busier (less idle time) and nonces are checked against the previous work to catch races. This may result in a higher utility (effective hashrate). * Tested and fixed problems managing throttling BitFORCE Singles. An 880 MH/s Single that was throttling to 440 MH/s before (and throwing lots of garbled errors) is now working cleanly at 590 MH/s - a 34% improvement in hashrate! * A bunch of annoying crashes were fixed: * * With high-share devices when network connectivity is lost. * * GPU mining not functional when any OpenCL platform fails. * * Occasional deadlock/hang during exit. * New --debuglog option to include debug information in log file (see README) but not in main console. * The scrypt code was updated - now ALL results from a GPU are checked to see if they're valid and if not, instead of being submitted where they are guaranteed to be rejected, they register as hardware errors. This will make tuning your scrypt parameters easier as HW error count will rise if you've set the parameters too high. * Updated miner.php from Kano -- graeme Thu, 09 Aug 2012 10:39:00 -0700 bfgminer (2.6.3.2-0precise2) precise; urgency=low * Switched to a Debian native package so that building minor packaging versions is easier. * Dropped debhelper reqs to version 7, so we can build on Lucid. -- graeme Thu, 09 Aug 2012 10:39:00 -0700 bfgminer (2.6.3.2-0precise1) precise; urgency=low * Added modprobe config for bitforce devices. -- graeme Wed, 08 Aug 2012 11:56:28 -0700 bfgminer (2.6.3.1-0precise1) precise; urgency=medium Version 2.6.3.1 - August 8, 2012 * Added minor version increment so that Launchpad will accept new orig.tar.xz upload. :/ -- graeme Wed, 08 Aug 2012 11:11:28 -0700 bfgminer (2.6.3-1precise1) precise; urgency=medium Version 2.6.3 - August 6, 2012 * First release to attempt Debian packaging. * Complete rewrite of the queueing mechanism which gets new work from the pools. The code in that section had gotten out of hand and become a "state machine" that was difficult to model its behaviour and occasionally led to stalls during startup and what-not without work and perhaps queued too much work in general. Now it monitors carefully the amount of queued and available work on a pool-by-pool basis and only queues when needed, but tries hard to make sure it doesn't run empty. You will notice the the TQ value now hovers around zero when all is well, and only jumps up transiently, all going well. Hopefully the efficiency is even higher now too. Detection of pool lag should be more accurate now, and less shares should leak to backup pools now as well. * Numerous bugfixes and workarounds for ModMiner driver, bringing it to a functional level even on Windows. * Scrypt support enabled for Windows binaries. * Can set failover-only mode now from the menu during runtime. * One-shot schedules (exactly one of --sched-stop or --sched-start) now handle times beyond midnight properly. * ModMiner now shows bitstream upload in summary line rather than spamming the log (progress is still logged every 10% for timestamps). * The RPC API should be able to bind to the port immediately on linux now instead of having to wait 60 seconds as it used to. * Proper big endian (routers) support, including fixing and enabling functionality that crashed before. * Minimal support for the Enterpoint's Cairnsmore1 FPGA mining board. * Throttling episodes on bitforce devices will now perform reasonably (given the circumstances) and/or register as HW errors. * Support for all FPGA drivers enabled by default when compiling from source. -- graeme Tue, 07 Aug 2012 10:51:00 -0500 bfgminer-bfgminer-3.10.0/debian/compat000066400000000000000000000000021226556647300176130ustar00rootroot000000000000007 bfgminer-bfgminer-3.10.0/debian/control000066400000000000000000000012501226556647300200160ustar00rootroot00000000000000Source: bfgminer Priority: optional Section: misc Maintainer: Luke Dashjr Standards-Version: 3.10.0 Build-Depends: build-essential, debhelper, autoconf, automake, libtool, libssl-dev, yasm, pkg-config, libudev-dev, libcurl4-openssl-dev, wget, unzip, libjansson-dev, libncurses5-dev, libudev-dev, libusb-1.0-0-dev, git, quilt, uthash-dev, libsensors4-dev Package: bfgminer Architecture: any Depends: libjansson4, libudev0 | libudev1, libcurl3, libncurses5, libusb-1.0-0, libsensors4 Description: This is a multi-threaded multi-pool GPU, FPGA and CPU miner with ATI GPU monitoring, (over)clocking and fanspeed support for bitcoin and derivative coins. bfgminer-bfgminer-3.10.0/debian/copyright000066400000000000000000000036171226556647300203570ustar00rootroot00000000000000This package was debianized by Nick Bushor on Sat, 05 May 2012 12:08:35 -0500. It was downloaded from https://github.com/luke-jr/bfgminer Upstream Author: Luke Dashjr Distributed under the terms of the GNU General Public License. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA On Debian GNU/Linux systems, the complete text of the GNU General Public License can be found in the /usr/share/common-licenses/GPL file. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The names of the authors may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. bfgminer-bfgminer-3.10.0/debian/patches/000077500000000000000000000000001226556647300200445ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/debian/patches/bfgminer-bitforce.diff000066400000000000000000000001501226556647300242560ustar00rootroot00000000000000--- /dev/null +++ b/bfgminer-bitforce.conf @@ -0,0 +1 @@ +options ftdi_sio vendor=0x0403 product=0x6014 bfgminer-bfgminer-3.10.0/debian/patches/series000066400000000000000000000000271226556647300212600ustar00rootroot00000000000000bfgminer-bitforce.diff bfgminer-bfgminer-3.10.0/debian/rules000077500000000000000000000005201226556647300174720ustar00rootroot00000000000000#!/usr/bin/make -f %: dh $@ override_dh_auto_configure: NOSUBMODULES=1 ./autogen.sh dh_auto_configure -- --enable-ztex --enable-bitforce --enable-icarus --enable-cpumining --enable-scrypt --enable-opencl override_dh_auto_install: $(MAKE) DESTDIR=$(CURDIR)/debian/bfgminer install override_dh_shlibdeps: dh_shlibdeps -Xbfgminer bfgminer-bfgminer-3.10.0/debian/source/000077500000000000000000000000001226556647300177155ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/debian/source/format000066400000000000000000000000151226556647300211240ustar00rootroot000000000000003.0 (native) bfgminer-bfgminer-3.10.0/deviceapi.c000066400000000000000000000610201226556647300172670ustar00rootroot00000000000000/* * Copyright 2011-2013 Luke Dashjr * Copyright 2011-2012 Con Kolivas * Copyright 2012-2013 Andrew Smith * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef WIN32 #include #else #include #endif #include #include #include #include #include #include #include "compat.h" #include "deviceapi.h" #include "logging.h" #include "lowlevel.h" #ifdef NEED_BFG_LOWL_VCOM #include "lowl-vcom.h" #endif #include "miner.h" #include "util.h" struct driver_registration *_bfg_drvreg1; struct driver_registration *_bfg_drvreg2; void _bfg_register_driver(const struct device_drv *drv) { static struct driver_registration *initlist; struct driver_registration *ndr; if (!drv) { // Move initlist to hashtables LL_FOREACH(initlist, ndr) { drv = ndr->drv; if (drv->drv_init) drv->drv_init(); HASH_ADD_KEYPTR(hh , _bfg_drvreg1, drv->dname, strlen(drv->dname), ndr); HASH_ADD_KEYPTR(hh2, _bfg_drvreg2, drv->name , strlen(drv->name ), ndr); } initlist = NULL; return; } ndr = malloc(sizeof(*ndr)); *ndr = (struct driver_registration){ .drv = drv, }; LL_PREPEND(initlist, ndr); } static int sort_drv_by_dname(struct driver_registration * const a, struct driver_registration * const b) { return strcmp(a->drv->dname, b->drv->dname); }; static int sort_drv_by_priority(struct driver_registration * const a, struct driver_registration * const b) { return a->drv->probe_priority - b->drv->probe_priority; }; void bfg_devapi_init() { _bfg_register_driver(NULL); HASH_SRT(hh , _bfg_drvreg1, sort_drv_by_dname ); HASH_SRT(hh2, _bfg_drvreg2, sort_drv_by_priority); } bool hashes_done(struct thr_info *thr, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce) { struct cgpu_info *cgpu = thr->cgpu; const long cycle = opt_log_interval / 5 ? : 1; if (unlikely(hashes == -1)) { if (timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > 0) dev_error(cgpu, REASON_THREAD_ZERO_HASH); if (thr->scanhash_working && opt_restart) { applog(LOG_ERR, "%"PRIpreprv" failure, attempting to reinitialize", cgpu->proc_repr); thr->scanhash_working = false; cgpu->reinit_backoff = 5.2734375; hashes = 0; } else { applog(LOG_ERR, "%"PRIpreprv" failure, disabling!", cgpu->proc_repr); cgpu->deven = DEV_RECOVER_ERR; run_cmd(cmd_idle); return false; } } else thr->scanhash_working = true; thr->hashes_done += hashes; if (hashes > cgpu->max_hashes) cgpu->max_hashes = hashes; timeradd(&thr->tv_hashes_done, tvp_hashes, &thr->tv_hashes_done); // max_nonce management (optional) if (unlikely((long)thr->tv_hashes_done.tv_sec < cycle)) { int mult; if (likely(!max_nonce || *max_nonce == 0xffffffff)) return true; mult = 1000000 / ((thr->tv_hashes_done.tv_usec + 0x400) / 0x400) + 0x10; mult *= cycle; if (*max_nonce > (0xffffffff * 0x400) / mult) *max_nonce = 0xffffffff; else *max_nonce = (*max_nonce * mult) / 0x400; } else if (unlikely(thr->tv_hashes_done.tv_sec > cycle) && max_nonce) *max_nonce = *max_nonce * cycle / thr->tv_hashes_done.tv_sec; else if (unlikely(thr->tv_hashes_done.tv_usec > 100000) && max_nonce) *max_nonce = *max_nonce * 0x400 / (((cycle * 1000000) + thr->tv_hashes_done.tv_usec) / (cycle * 1000000 / 0x400)); hashmeter2(thr); return true; } bool hashes_done2(struct thr_info *thr, int64_t hashes, uint32_t *max_nonce) { struct timeval tv_now, tv_delta; timer_set_now(&tv_now); timersub(&tv_now, &thr->_tv_last_hashes_done_call, &tv_delta); thr->_tv_last_hashes_done_call = tv_now; return hashes_done(thr, hashes, &tv_delta, max_nonce); } /* A generic wait function for threads that poll that will wait a specified * time tdiff waiting on a work restart request. Returns zero if the condition * was met (work restart requested) or ETIMEDOUT if not. */ int restart_wait(struct thr_info *thr, unsigned int mstime) { struct timeval tv_timer, tv_now, tv_timeout; fd_set rfds; SOCKETTYPE wrn = thr->work_restart_notifier[0]; int rv; if (unlikely(thr->work_restart_notifier[1] == INVSOCK)) { // This is a bug! applog(LOG_ERR, "%"PRIpreprv": restart_wait called without a work_restart_notifier", thr->cgpu->proc_repr); cgsleep_ms(mstime); return (thr->work_restart ? 0 : ETIMEDOUT); } timer_set_now(&tv_now); timer_set_delay(&tv_timer, &tv_now, mstime * 1000); while (true) { FD_ZERO(&rfds); FD_SET(wrn, &rfds); tv_timeout = tv_timer; rv = select(wrn + 1, &rfds, NULL, NULL, select_timeout(&tv_timeout, &tv_now)); if (rv == 0) return ETIMEDOUT; if (rv > 0) { if (thr->work_restart) return 0; notifier_read(thr->work_restart_notifier); } timer_set_now(&tv_now); } } static struct work *get_and_prepare_work(struct thr_info *thr) { struct cgpu_info *proc = thr->cgpu; struct device_drv *api = proc->drv; struct work *work; work = get_work(thr); if (!work) return NULL; if (api->prepare_work && !api->prepare_work(thr, work)) { free_work(work); applog(LOG_ERR, "%"PRIpreprv": Work prepare failed, disabling!", proc->proc_repr); proc->deven = DEV_RECOVER_ERR; run_cmd(cmd_idle); return NULL; } return work; } // Miner loop to manage a single processor (with possibly multiple threads per processor) void minerloop_scanhash(struct thr_info *mythr) { struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *api = cgpu->drv; struct timeval tv_start, tv_end; struct timeval tv_hashes, tv_worktime; uint32_t max_nonce = api->can_limit_work ? api->can_limit_work(mythr) : 0xffffffff; int64_t hashes; struct work *work; const bool primary = (!mythr->device_thread) || mythr->primary_thread; #ifdef HAVE_PTHREAD_CANCEL pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL); #endif if (cgpu->deven != DEV_ENABLED) mt_disable(mythr); while (likely(!cgpu->shutdown)) { mythr->work_restart = false; request_work(mythr); work = get_and_prepare_work(mythr); if (!work) break; timer_set_now(&work->tv_work_start); do { thread_reportin(mythr); /* Only allow the mining thread to be cancelled when * it is not in the driver code. */ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL); timer_set_now(&tv_start); hashes = api->scanhash(mythr, work, work->blk.nonce + max_nonce); timer_set_now(&tv_end); pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); pthread_testcancel(); thread_reportin(mythr); timersub(&tv_end, &tv_start, &tv_hashes); if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &max_nonce : NULL)) goto disabled; if (unlikely(mythr->work_restart)) { /* Apart from device_thread 0, we stagger the * starting of every next thread to try and get * all devices busy before worrying about * getting work for their extra threads */ if (!primary) { struct timespec rgtp; rgtp.tv_sec = 0; rgtp.tv_nsec = 250 * mythr->device_thread * 1000000; nanosleep(&rgtp, NULL); } break; } if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) disabled: mt_disable(mythr); timersub(&tv_end, &work->tv_work_start, &tv_worktime); } while (!abandon_work(work, &tv_worktime, cgpu->max_hashes)); free_work(work); } } bool do_job_prepare(struct thr_info *mythr, struct timeval *tvp_now) { struct cgpu_info *proc = mythr->cgpu; struct device_drv *api = proc->drv; struct timeval tv_worktime; mythr->tv_morework.tv_sec = -1; mythr->_job_transition_in_progress = true; if (mythr->work) timersub(tvp_now, &mythr->work->tv_work_start, &tv_worktime); if ((!mythr->work) || abandon_work(mythr->work, &tv_worktime, proc->max_hashes)) { mythr->work_restart = false; request_work(mythr); // FIXME: Allow get_work to return NULL to retry on notification if (mythr->next_work) free_work(mythr->next_work); mythr->next_work = get_and_prepare_work(mythr); if (!mythr->next_work) return false; mythr->starting_next_work = true; api->job_prepare(mythr, mythr->next_work, mythr->_max_nonce); } else { mythr->starting_next_work = false; api->job_prepare(mythr, mythr->work, mythr->_max_nonce); } job_prepare_complete(mythr); return true; } void job_prepare_complete(struct thr_info *mythr) { if (unlikely(mythr->busy_state == TBS_GETTING_RESULTS)) return; if (mythr->work) { if (true /* TODO: job is near complete */ || unlikely(mythr->work_restart)) do_get_results(mythr, true); else {} // TODO: Set a timer to call do_get_results when job is near complete } else // no job currently running do_job_start(mythr); } void do_get_results(struct thr_info *mythr, bool proceed_with_new_job) { struct cgpu_info *proc = mythr->cgpu; struct device_drv *api = proc->drv; struct work *work = mythr->work; mythr->_job_transition_in_progress = true; mythr->tv_results_jobstart = mythr->tv_jobstart; mythr->_proceed_with_new_job = proceed_with_new_job; if (api->job_get_results) api->job_get_results(mythr, work); else job_results_fetched(mythr); } void job_results_fetched(struct thr_info *mythr) { if (mythr->_proceed_with_new_job) do_job_start(mythr); else { if (likely(mythr->prev_work)) { struct timeval tv_now; timer_set_now(&tv_now); do_process_results(mythr, &tv_now, mythr->prev_work, true); } mt_disable_start(mythr); } } void do_job_start(struct thr_info *mythr) { struct cgpu_info *proc = mythr->cgpu; struct device_drv *api = proc->drv; thread_reportin(mythr); api->job_start(mythr); } void mt_job_transition(struct thr_info *mythr) { struct timeval tv_now; timer_set_now(&tv_now); if (mythr->starting_next_work) { mythr->next_work->tv_work_start = tv_now; if (mythr->prev_work) free_work(mythr->prev_work); mythr->prev_work = mythr->work; mythr->work = mythr->next_work; mythr->next_work = NULL; } mythr->tv_jobstart = tv_now; mythr->_job_transition_in_progress = false; } void job_start_complete(struct thr_info *mythr) { struct timeval tv_now; if (unlikely(!mythr->prev_work)) return; timer_set_now(&tv_now); do_process_results(mythr, &tv_now, mythr->prev_work, false); } void job_start_abort(struct thr_info *mythr, bool failure) { struct cgpu_info *proc = mythr->cgpu; if (failure) { proc->deven = DEV_RECOVER_ERR; run_cmd(cmd_idle); } mythr->work = NULL; mythr->_job_transition_in_progress = false; } bool do_process_results(struct thr_info *mythr, struct timeval *tvp_now, struct work *work, bool stopping) { struct cgpu_info *proc = mythr->cgpu; struct device_drv *api = proc->drv; struct timeval tv_hashes; int64_t hashes = 0; if (api->job_process_results) hashes = api->job_process_results(mythr, work, stopping); thread_reportin(mythr); if (hashes) { timersub(tvp_now, &mythr->tv_results_jobstart, &tv_hashes); if (!hashes_done(mythr, hashes, &tv_hashes, api->can_limit_work ? &mythr->_max_nonce : NULL)) return false; } return true; } static void do_notifier_select(struct thr_info *thr, struct timeval *tvp_timeout) { struct cgpu_info *cgpu = thr->cgpu; struct timeval tv_now; int maxfd; fd_set rfds; timer_set_now(&tv_now); FD_ZERO(&rfds); FD_SET(thr->notifier[0], &rfds); maxfd = thr->notifier[0]; FD_SET(thr->work_restart_notifier[0], &rfds); set_maxfd(&maxfd, thr->work_restart_notifier[0]); if (thr->mutex_request[1] != INVSOCK) { FD_SET(thr->mutex_request[0], &rfds); set_maxfd(&maxfd, thr->mutex_request[0]); } if (select(maxfd + 1, &rfds, NULL, NULL, select_timeout(tvp_timeout, &tv_now)) < 0) return; if (thr->mutex_request[1] != INVSOCK && FD_ISSET(thr->mutex_request[0], &rfds)) { // FIXME: This can only handle one request at a time! pthread_mutex_t *mutexp = &cgpu->device_mutex; notifier_read(thr->mutex_request); mutex_lock(mutexp); pthread_cond_signal(&cgpu->device_cond); pthread_cond_wait(&cgpu->device_cond, mutexp); mutex_unlock(mutexp); } if (FD_ISSET(thr->notifier[0], &rfds)) { notifier_read(thr->notifier); } if (FD_ISSET(thr->work_restart_notifier[0], &rfds)) notifier_read(thr->work_restart_notifier); } void cgpu_setup_control_requests(struct cgpu_info * const cgpu) { mutex_init(&cgpu->device_mutex); notifier_init(cgpu->thr[0]->mutex_request); pthread_cond_init(&cgpu->device_cond, NULL); } void cgpu_request_control(struct cgpu_info * const cgpu) { struct thr_info * const thr = cgpu->thr[0]; if (pthread_equal(pthread_self(), thr->pth)) return; mutex_lock(&cgpu->device_mutex); notifier_wake(thr->mutex_request); pthread_cond_wait(&cgpu->device_cond, &cgpu->device_mutex); } void cgpu_release_control(struct cgpu_info * const cgpu) { struct thr_info * const thr = cgpu->thr[0]; if (pthread_equal(pthread_self(), thr->pth)) return; pthread_cond_signal(&cgpu->device_cond); mutex_unlock(&cgpu->device_mutex); } static void _minerloop_setup(struct thr_info *mythr) { struct cgpu_info * const cgpu = mythr->cgpu, *proc; if (mythr->work_restart_notifier[1] == -1) notifier_init(mythr->work_restart_notifier); for (proc = cgpu; proc; proc = proc->next_proc) { mythr = proc->thr[0]; timer_set_now(&mythr->tv_watchdog); proc->disable_watchdog = true; } } void minerloop_async(struct thr_info *mythr) { struct thr_info *thr = mythr; struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *api = cgpu->drv; struct timeval tv_now; struct timeval tv_timeout; struct cgpu_info *proc; bool is_running, should_be_running; _minerloop_setup(mythr); while (likely(!cgpu->shutdown)) { tv_timeout.tv_sec = -1; timer_set_now(&tv_now); for (proc = cgpu; proc; proc = proc->next_proc) { mythr = proc->thr[0]; // Nothing should happen while we're starting a job if (unlikely(mythr->busy_state == TBS_STARTING_JOB)) goto defer_events; is_running = mythr->work; should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause); if (should_be_running) { if (unlikely(!(is_running || mythr->_job_transition_in_progress))) { mt_disable_finish(mythr); goto djp; } if (unlikely(mythr->work_restart)) goto djp; } else // ! should_be_running { if (unlikely((is_running || !mythr->_mt_disable_called) && !mythr->_job_transition_in_progress)) { disabled: ; timer_unset(&mythr->tv_morework); if (is_running) { if (mythr->busy_state != TBS_GETTING_RESULTS) do_get_results(mythr, false); else // Avoid starting job when pending result fetch completes mythr->_proceed_with_new_job = false; } else // !mythr->_mt_disable_called mt_disable_start(mythr); } } if (timer_passed(&mythr->tv_morework, &tv_now)) { djp: ; if (!do_job_prepare(mythr, &tv_now)) goto disabled; } defer_events: if (timer_passed(&mythr->tv_poll, &tv_now)) api->poll(mythr); if (timer_passed(&mythr->tv_watchdog, &tv_now)) { timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000); bfg_watchdog(proc, &tv_now); } reduce_timeout_to(&tv_timeout, &mythr->tv_morework); reduce_timeout_to(&tv_timeout, &mythr->tv_poll); reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog); } do_notifier_select(thr, &tv_timeout); } } static void do_queue_flush(struct thr_info *mythr) { struct cgpu_info *proc = mythr->cgpu; struct device_drv *api = proc->drv; api->queue_flush(mythr); if (mythr->next_work) { free_work(mythr->next_work); mythr->next_work = NULL; } } void minerloop_queue(struct thr_info *thr) { struct thr_info *mythr; struct cgpu_info *cgpu = thr->cgpu; struct device_drv *api = cgpu->drv; struct timeval tv_now; struct timeval tv_timeout; struct cgpu_info *proc; bool should_be_running; struct work *work; _minerloop_setup(thr); while (likely(!cgpu->shutdown)) { tv_timeout.tv_sec = -1; timer_set_now(&tv_now); for (proc = cgpu; proc; proc = proc->next_proc) { mythr = proc->thr[0]; should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause); redo: if (should_be_running) { if (unlikely(mythr->_mt_disable_called)) mt_disable_finish(mythr); if (unlikely(mythr->work_restart)) { mythr->work_restart = false; do_queue_flush(mythr); } while (!mythr->queue_full) { if (mythr->next_work) { work = mythr->next_work; mythr->next_work = NULL; } else { request_work(mythr); // FIXME: Allow get_work to return NULL to retry on notification work = get_and_prepare_work(mythr); } if (!work) break; if (!api->queue_append(mythr, work)) mythr->next_work = work; } } else if (unlikely(!mythr->_mt_disable_called)) { do_queue_flush(mythr); mt_disable_start(mythr); } if (timer_passed(&mythr->tv_poll, &tv_now)) api->poll(mythr); if (timer_passed(&mythr->tv_watchdog, &tv_now)) { timer_set_delay(&mythr->tv_watchdog, &tv_now, WATCHDOG_INTERVAL * 1000000); bfg_watchdog(proc, &tv_now); } should_be_running = (proc->deven == DEV_ENABLED && !mythr->pause); if (should_be_running && !mythr->queue_full) goto redo; reduce_timeout_to(&tv_timeout, &mythr->tv_poll); reduce_timeout_to(&tv_timeout, &mythr->tv_watchdog); } do_notifier_select(thr, &tv_timeout); } } void *miner_thread(void *userdata) { struct thr_info *mythr = userdata; struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *drv = cgpu->drv; pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); char threadname[20]; snprintf(threadname, 20, "miner_%s", cgpu->proc_repr_ns); RenameThread(threadname); if (drv->thread_init && !drv->thread_init(mythr)) { dev_error(cgpu, REASON_THREAD_FAIL_INIT); for (struct cgpu_info *slave = cgpu->next_proc; slave && !slave->threads; slave = slave->next_proc) dev_error(slave, REASON_THREAD_FAIL_INIT); __thr_being_msg(LOG_ERR, mythr, "failure, exiting"); goto out; } if (drv_ready(cgpu)) cgpu_set_defaults(cgpu); thread_reportout(mythr); applog(LOG_DEBUG, "Popping ping in miner thread"); notifier_read(mythr->notifier); // Wait for a notification to start cgtime(&cgpu->cgminer_stats.start_tv); if (drv->minerloop) drv->minerloop(mythr); else minerloop_scanhash(mythr); __thr_being_msg(LOG_NOTICE, mythr, "shutting down"); out: ; struct cgpu_info *proc = cgpu; do { proc->deven = DEV_DISABLED; proc->status = LIFE_DEAD2; } while ( (proc = proc->next_proc) && !proc->threads); mythr->getwork = 0; mythr->has_pth = false; cgsleep_ms(1); if (drv->thread_shutdown) drv->thread_shutdown(mythr); notifier_destroy(mythr->notifier); return NULL; } static pthread_mutex_t _add_cgpu_mutex = PTHREAD_MUTEX_INITIALIZER; static bool _add_cgpu(struct cgpu_info *cgpu) { int lpcount; renumber_cgpu(cgpu); if (!cgpu->procs) cgpu->procs = 1; lpcount = cgpu->procs; cgpu->device = cgpu; cgpu->dev_repr = malloc(6); sprintf(cgpu->dev_repr, "%s%2u", cgpu->drv->name, cgpu->device_id % 100); cgpu->dev_repr_ns = malloc(6); sprintf(cgpu->dev_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id % 100); strcpy(cgpu->proc_repr, cgpu->dev_repr); sprintf(cgpu->proc_repr_ns, "%s%u", cgpu->drv->name, cgpu->device_id); #ifdef NEED_BFG_LOWL_VCOM maybe_strdup_if_null(&cgpu->dev_manufacturer, detectone_meta_info.manufacturer); maybe_strdup_if_null(&cgpu->dev_product, detectone_meta_info.product); maybe_strdup_if_null(&cgpu->dev_serial, detectone_meta_info.serial); #endif devices_new = realloc(devices_new, sizeof(struct cgpu_info *) * (total_devices_new + lpcount + 1)); devices_new[total_devices_new++] = cgpu; if (lpcount > 1) { int ns; int tpp = cgpu->threads / lpcount; struct cgpu_info **nlp_p, *slave; const bool manylp = (lpcount > 26); const char *as = (manylp ? "aa" : "a"); // Note, strcpy instead of assigning a byte to get the \0 too strcpy(&cgpu->proc_repr[5], as); ns = strlen(cgpu->proc_repr_ns); strcpy(&cgpu->proc_repr_ns[ns], as); nlp_p = &cgpu->next_proc; for (int i = 1; i < lpcount; ++i) { slave = malloc(sizeof(*slave)); *slave = *cgpu; slave->proc_id = i; if (manylp) { slave->proc_repr[5] += i / 26; slave->proc_repr[6] += i % 26; slave->proc_repr_ns[ns ] += i / 26; slave->proc_repr_ns[ns + 1] += i % 26; } else { slave->proc_repr[5] += i; slave->proc_repr_ns[ns] += i; } slave->threads = tpp; devices_new[total_devices_new++] = slave; *nlp_p = slave; nlp_p = &slave->next_proc; } *nlp_p = NULL; cgpu->proc_id = 0; cgpu->threads -= (tpp * (lpcount - 1)); } cgpu->last_device_valid_work = time(NULL); return true; } bool add_cgpu(struct cgpu_info *cgpu) { mutex_lock(&_add_cgpu_mutex); const bool rv = _add_cgpu(cgpu); mutex_unlock(&_add_cgpu_mutex); return rv; } void add_cgpu_live(void *p) { add_cgpu(p); } bool add_cgpu_slave(struct cgpu_info *cgpu, struct cgpu_info *prev_cgpu) { if (!prev_cgpu) return add_cgpu(cgpu); while (prev_cgpu->next_proc) prev_cgpu = prev_cgpu->next_proc; mutex_lock(&_add_cgpu_mutex); int old_total_devices = total_devices_new; if (!_add_cgpu(cgpu)) { mutex_unlock(&_add_cgpu_mutex); return false; } prev_cgpu->next_proc = devices_new[old_total_devices]; mutex_unlock(&_add_cgpu_mutex); return true; } #ifdef NEED_BFG_LOWL_VCOM bool _serial_detect_all(struct lowlevel_device_info * const info, void * const userp) { detectone_func_t detectone = userp; if (serial_claim(info->path, NULL)) applogr(false, LOG_DEBUG, "%s is already claimed... skipping probes", info->path); return detectone(info->path); } #endif int _serial_detect(struct device_drv *api, detectone_func_t detectone, autoscan_func_t autoscan, int flags) { struct string_elist *iter, *tmp; const char *dev, *colon; bool inhibitauto = flags & 4; char found = 0; bool forceauto = flags & 1; bool hasname; bool doall = false; size_t namel = strlen(api->name); size_t dnamel = strlen(api->dname); #ifdef NEED_BFG_LOWL_VCOM clear_detectone_meta_info(); #endif DL_FOREACH_SAFE(scan_devices, iter, tmp) { dev = iter->string; if ((colon = strchr(dev, ':')) && colon[1] != '\0') { size_t idlen = colon - dev; // allow either name:device or dname:device if ((idlen != namel || strncasecmp(dev, api->name, idlen)) && (idlen != dnamel || strncasecmp(dev, api->dname, idlen))) continue; dev = colon + 1; hasname = true; } else hasname = false; if (!strcmp(dev, "auto")) forceauto = true; else if (!strcmp(dev, "noauto")) inhibitauto = true; else if ((flags & 2) && !hasname) continue; else if (!detectone) {} // do nothing else if (!strcmp(dev, "all")) doall = true; #ifdef NEED_BFG_LOWL_VCOM else if (serial_claim(dev, NULL)) { applog(LOG_DEBUG, "%s is already claimed... skipping probes", dev); string_elist_del(&scan_devices, iter); } #endif else if (detectone(dev)) { string_elist_del(&scan_devices, iter); ++found; } } #ifdef NEED_BFG_LOWL_VCOM if (doall && detectone) found += lowlevel_detect_id(_serial_detect_all, detectone, &lowl_vcom, 0, 0); #endif if ((forceauto || !(inhibitauto || found)) && autoscan) found += autoscan(); return found; } static FILE *_open_bitstream(const char *path, const char *subdir, const char *sub2, const char *filename) { char fullpath[PATH_MAX]; strcpy(fullpath, path); strcat(fullpath, "/"); if (subdir) { strcat(fullpath, subdir); strcat(fullpath, "/"); } if (sub2) { strcat(fullpath, sub2); strcat(fullpath, "/"); } strcat(fullpath, filename); return fopen(fullpath, "rb"); } #define _open_bitstream(path, subdir, sub2) do { \ f = _open_bitstream(path, subdir, sub2, filename); \ if (f) \ return f; \ } while(0) #define _open_bitstream2(path, path3) do { \ _open_bitstream(path, NULL, path3); \ _open_bitstream(path, "../share/" PACKAGE, path3); \ _open_bitstream(path, "../" PACKAGE, path3); \ } while(0) #define _open_bitstream3(path) do { \ _open_bitstream2(path, dname); \ _open_bitstream2(path, "bitstreams"); \ _open_bitstream2(path, NULL); \ } while(0) FILE *open_bitstream(const char *dname, const char *filename) { FILE *f; _open_bitstream3(opt_kernel_path); _open_bitstream3(cgminer_path); _open_bitstream3("."); return NULL; } void close_device_fd(struct thr_info * const thr) { struct cgpu_info * const proc = thr->cgpu; const int fd = proc->device_fd; if (fd == -1) return; if (close(fd)) applog(LOG_WARNING, "%"PRIpreprv": Error closing device fd", proc->proc_repr); else { proc->device_fd = -1; applog(LOG_DEBUG, "%"PRIpreprv": Closed device fd", proc->proc_repr); } } bfgminer-bfgminer-3.10.0/deviceapi.h000066400000000000000000000100551226556647300172760ustar00rootroot00000000000000#ifndef __DEVICEAPI_H__ #define __DEVICEAPI_H__ #include #include #include #include #include "miner.h" struct driver_registration; struct driver_registration { const struct device_drv *drv; UT_hash_handle hh; // hash & order by dname UT_hash_handle hh2; // hash by name, order by priority struct driver_registration *next; // DO NOT USE }; extern struct driver_registration *_bfg_drvreg1; extern struct driver_registration *_bfg_drvreg2; extern void bfg_devapi_init(); #define BFG_FIND_DRV_BY_DNAME(reg, name, namelen) \ HASH_FIND(hh , _bfg_drvreg1, name, namelen, reg) #define BFG_FIND_DRV_BY_NAME(reg, name, namelen) \ HASH_FIND(hh2, _bfg_drvreg2, name, namelen, reg) #define BFG_FOREACH_DRIVER_BY_DNAME(reg, tmp) \ HASH_ITER(hh , _bfg_drvreg1, reg, tmp) #define BFG_FOREACH_DRIVER_BY_PRIORITY(reg, tmp) \ HASH_ITER(hh2, _bfg_drvreg2, reg, tmp) extern void _bfg_register_driver(const struct device_drv *); #define BFG_REGISTER_DRIVER(drv) \ struct device_drv drv; \ __attribute__((constructor)) \ static void __bfg_register_drv_ ## drv() { \ _bfg_register_driver(&drv); \ } \ // END BFG_REGISTER_DRIVER extern bool bfg_need_detect_rescan; extern void request_work(struct thr_info *); extern struct work *get_work(struct thr_info *); extern bool hashes_done(struct thr_info *, int64_t hashes, struct timeval *tvp_hashes, uint32_t *max_nonce); extern bool hashes_done2(struct thr_info *, int64_t hashes, uint32_t *max_nonce); extern void mt_disable_start(struct thr_info *); extern void mt_disable_finish(struct thr_info *); extern void mt_disable(struct thr_info *); // blocks until reenabled extern int restart_wait(struct thr_info *, unsigned int ms); extern void minerloop_scanhash(struct thr_info *); extern bool do_job_prepare(struct thr_info *, struct timeval *tvp_now); extern void job_prepare_complete(struct thr_info *); extern void do_get_results(struct thr_info *, bool proceed_with_new_job); extern void job_results_fetched(struct thr_info *); extern void do_job_start(struct thr_info *); extern void mt_job_transition(struct thr_info *); extern void job_start_complete(struct thr_info *); extern void job_start_abort(struct thr_info *, bool failure); extern bool do_process_results(struct thr_info *, struct timeval *tvp_now, struct work *, bool stopping); extern void minerloop_async(struct thr_info *); extern void minerloop_queue(struct thr_info *); // Establishes a simple way for external threads to directly communicate with device extern void cgpu_setup_control_requests(struct cgpu_info *); extern void cgpu_request_control(struct cgpu_info *); extern void cgpu_release_control(struct cgpu_info *); extern void *miner_thread(void *); extern void add_cgpu_live(void*); extern bool add_cgpu_slave(struct cgpu_info *, struct cgpu_info *master); typedef bool(*detectone_func_t)(const char*); typedef int(*autoscan_func_t)(); enum generic_detect_flags { GDF_FORCE_AUTO = 1, GDF_REQUIRE_DNAME = 2, GDF_DEFAULT_NOAUTO = 4, }; extern int _serial_detect(struct device_drv *api, detectone_func_t, autoscan_func_t, int flags); #define serial_detect_fauto(api, detectone, autoscan) \ _serial_detect(api, detectone, autoscan, 1) #define serial_detect_auto(api, detectone, autoscan) \ _serial_detect(api, detectone, autoscan, 0) #define serial_detect_auto_byname(api, detectone, autoscan) \ _serial_detect(api, detectone, autoscan, 2) #define serial_detect(api, detectone) \ _serial_detect(api, detectone, NULL, 0) #define serial_detect_byname(api, detectone) \ _serial_detect(api, detectone, NULL, 2) #define noserial_detect(api, autoscan) \ _serial_detect(api, NULL , autoscan, 0) #define noserial_detect_manual(api, autoscan) \ _serial_detect(api, NULL , autoscan, 4) #define generic_detect(drv, detectone, autoscan, flags) _serial_detect(drv, detectone, autoscan, flags) extern FILE *open_bitstream(const char *dname, const char *filename); extern void close_device_fd(struct thr_info *); #endif bfgminer-bfgminer-3.10.0/diablo130302.cl000066400000000000000000001272671226556647300174350ustar00rootroot00000000000000/* * DiabloMiner - OpenCL miner for BitCoin * Copyright (C) 2012, 2013 Con Kolivas * Copyright (C) 2010, 2011, 2012 Patrick McFarland * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more detail). * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #ifdef VECTORS4 typedef uint4 z; #elif defined(VECTORS2) typedef uint2 z; #else typedef uint z; #endif #ifdef BITALIGN #pragma OPENCL EXTENSION cl_amd_media_ops : enable #define Zrotr(a, b) amd_bitalign((z)a, (z)a, (z)(32 - b)) #else #define Zrotr(a, b) rotate((z)a, (z)b) #endif #ifdef BFI_INT #define ZCh(a, b, c) amd_bytealign(a, b, c) #define ZMa(a, b, c) amd_bytealign((c ^ a), (b), (a)) #else #define ZCh(a, b, c) bitselect((z)c, (z)b, (z)a) #define ZMa(a, b, c) bitselect((z)a, (z)b, (z)c ^ (z)a) #endif /* These constants are not the classic SHA256 constants but the order that * constants are used in this kernel. */ __constant uint K[] = { 0xd807aa98U, 0x12835b01U, 0x243185beU, 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU, 0x9bdc06a7U, 0xc19bf3f4U, 0x0fc19dc6U, 0x240ca1ccU, 0x80000000U, // 10 0x2de92c6fU, 0x4a7484aaU, 0x00000280U, 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U, 0xc6e00bf3U, // 20 0x00A00055U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU, 0x53380d13U, 0x650a7354U, 0x766a0abbU, // 30 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU, 0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, // 40 0x19a4c116U, 0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U, 0x748f82eeU, 0x78a5636fU, // 50 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, 0xbef9a3f7U, 0xc67178f2U, 0x98c7e2a2U, 0x90bb1e3cU, 0x510e527fU, 0x9b05688cU, // 60 0xfc08884dU, 0x3c6ef372U, 0x50c6645bU, 0x6a09e667U, 0xbb67ae85U, 0x3ac42e24U, 0xd21ea4fdU, 0x59f111f1U, 0x923f82a4U, 0xab1c5ed5U, // 70 0x5807aa98U, 0xc19bf274U, 0xe49b69c1U, 0x00a00000U, 0xefbe4786U, 0x00000100U, 0x11002000U, 0x00400022U, 0x136032EDU }; #define ZR25(n) ((Zrotr((n), 25) ^ Zrotr((n), 14) ^ ((n) >> 3U))) #define ZR15(n) ((Zrotr((n), 15) ^ Zrotr((n), 13) ^ ((n) >> 10U))) #define ZR26(n) ((Zrotr((n), 26) ^ Zrotr((n), 21) ^ Zrotr((n), 7))) #define ZR30(n) ((Zrotr((n), 30) ^ Zrotr((n), 19) ^ Zrotr((n), 10))) __kernel __attribute__((vec_type_hint(z))) __attribute__((reqd_work_group_size(WORKSIZE, 1, 1))) void search( #ifndef GOFFSET const z base, #endif const uint PreVal4_state0, const uint PreVal4_state0_k7, const uint PreVal4_T1, const uint W18, const uint W19, const uint W16, const uint W17, const uint W16_plus_K16, const uint W17_plus_K17, const uint W31, const uint W32, const uint d1, const uint b1, const uint c1, const uint h1, const uint f1, const uint g1, const uint c1_plus_k5, const uint b1_plus_k6, const uint state0, const uint state1, const uint state2, const uint state3, const uint state4, const uint state5, const uint state6, const uint state7, volatile __global uint * output) { z ZA[930]; #ifdef GOFFSET const z Znonce = (uint)(get_global_id(0)); #else const z Znonce = base + (uint)(get_global_id(0)); #endif ZA[15] = Znonce + PreVal4_state0; ZA[16] = (ZCh(ZA[15], b1, c1) + d1) + ZR26(ZA[15]); ZA[26] = Znonce + PreVal4_T1; ZA[27] = ZMa(f1, g1, ZA[26]) + ZR30(ZA[26]); ZA[17] = ZA[16] + h1; ZA[19] = (ZCh(ZA[17], ZA[15], b1) + c1_plus_k5) + ZR26(ZA[17]); ZA[28] = ZA[27] + ZA[16]; ZA[548] = ZMa(ZA[26], f1, ZA[28]) + ZR30(ZA[28]); ZA[20] = ZA[19] + g1; ZA[22] = (ZCh(ZA[20], ZA[17], ZA[15]) + b1_plus_k6) + ZR26(ZA[20]); ZA[29] = ZA[548] + ZA[19]; ZA[549] = ZMa(ZA[28], ZA[26], ZA[29]) + ZR30(ZA[29]); ZA[23] = ZA[22] + f1; ZA[24] = ZCh(ZA[23], ZA[20], ZA[17]) + ZR26(ZA[23]); ZA[180] = Znonce + PreVal4_state0_k7; ZA[30] = ZA[549] + ZA[22]; ZA[31] = ZMa(ZA[29], ZA[28], ZA[30]) + ZR30(ZA[30]); ZA[181] = ZA[180] + ZA[24]; ZA[182] = ZA[181] + ZA[26]; ZA[183] = ZA[181] + ZA[31]; ZA[18] = ZA[17] + K[0]; ZA[186] = (ZCh(ZA[182], ZA[23], ZA[20]) + ZA[18]) + ZR26(ZA[182]); ZA[184] = ZMa(ZA[30], ZA[29], ZA[183]) + ZR30(ZA[183]); ZA[187] = ZA[186] + ZA[28]; ZA[188] = ZA[186] + ZA[184]; ZA[21] = ZA[20] + K[1]; ZA[191] = (ZCh(ZA[187], ZA[182], ZA[23]) + ZA[21]) + ZR26(ZA[187]); ZA[189] = ZMa(ZA[183], ZA[30], ZA[188]) + ZR30(ZA[188]); ZA[192] = ZA[191] + ZA[29]; ZA[193] = ZA[191] + ZA[189]; ZA[25] = ZA[23] + K[2]; ZA[196] = (ZCh(ZA[192], ZA[187], ZA[182]) + ZA[25]) + ZR26(ZA[192]); ZA[194] = ZMa(ZA[188], ZA[183], ZA[193]) + ZR30(ZA[193]); ZA[197] = ZA[196] + ZA[30]; ZA[198] = ZA[196] + ZA[194]; ZA[185] = ZA[182] + K[3]; ZA[201] = (ZCh(ZA[197], ZA[192], ZA[187]) + ZA[185]) + ZR26(ZA[197]); ZA[199] = ZMa(ZA[193], ZA[188], ZA[198]) + ZR30(ZA[198]); ZA[202] = ZA[201] + ZA[183]; ZA[203] = ZA[201] + ZA[199]; ZA[190] = ZA[187] + K[4]; ZA[206] = (ZCh(ZA[202], ZA[197], ZA[192]) + ZA[190]) + ZR26(ZA[202]); ZA[204] = ZMa(ZA[198], ZA[193], ZA[203]) + ZR30(ZA[203]); ZA[207] = ZA[206] + ZA[188]; ZA[208] = ZA[206] + ZA[204]; ZA[195] = ZA[192] + K[5]; ZA[211] = (ZCh(ZA[207], ZA[202], ZA[197]) + ZA[195]) + ZR26(ZA[207]); ZA[209] = ZMa(ZA[203], ZA[198], ZA[208]) + ZR30(ZA[208]); ZA[212] = ZA[193] + ZA[211]; ZA[213] = ZA[211] + ZA[209]; ZA[200] = ZA[197] + K[6]; ZA[216] = (ZCh(ZA[212], ZA[207], ZA[202]) + ZA[200]) + ZR26(ZA[212]); ZA[214] = ZMa(ZA[208], ZA[203], ZA[213]) + ZR30(ZA[213]); ZA[217] = ZA[198] + ZA[216]; ZA[218] = ZA[216] + ZA[214]; ZA[205] = ZA[202] + K[7]; ZA[220] = (ZCh(ZA[217], ZA[212], ZA[207]) + ZA[205]) + ZR26(ZA[217]); ZA[219] = ZMa(ZA[213], ZA[208], ZA[218]) + ZR30(ZA[218]); ZA[222] = ZA[203] + ZA[220]; ZA[223] = ZA[220] + ZA[219]; ZA[210] = ZA[207] + W16_plus_K16; ZA[226] = (ZCh(ZA[222], ZA[217], ZA[212]) + ZA[210]) + ZR26(ZA[222]); ZA[225] = ZMa(ZA[218], ZA[213], ZA[223]) + ZR30(ZA[223]); ZA[0] = ZR25(Znonce) + W18; ZA[228] = ZA[226] + ZA[225]; ZA[227] = ZA[208] + ZA[226]; ZA[215] = ZA[212] + W17_plus_K17; ZA[231] = (ZCh(ZA[227], ZA[222], ZA[217]) + ZA[215]) + ZR26(ZA[227]); ZA[229] = ZMa(ZA[223], ZA[218], ZA[228]) + ZR30(ZA[228]); ZA[1] = ZA[0] + K[8]; ZA[232] = ZA[213] + ZA[231]; ZA[233] = ZA[231] + ZA[229]; ZA[221] = ZA[217] + ZA[1]; ZA[32] = Znonce + W19; ZA[236] = (ZCh(ZA[232], ZA[227], ZA[222]) + ZA[221]) + ZR26(ZA[232]); ZA[234] = ZMa(ZA[228], ZA[223], ZA[233]) + ZR30(ZA[233]); ZA[33] = ZA[32] + K[9]; ZA[3] = ZR15(ZA[0]) + K[10]; ZA[238] = ZA[236] + ZA[234]; ZA[237] = ZA[218] + ZA[236]; ZA[224] = ZA[222] + ZA[33]; ZA[241] = (ZCh(ZA[237], ZA[232], ZA[227]) + ZA[224]) + ZR26(ZA[237]); ZA[239] = ZMa(ZA[233], ZA[228], ZA[238]) + ZR30(ZA[238]); ZA[4] = ZA[3] + K[11]; ZA[35] = ZR15(ZA[32]); ZA[243] = ZA[241] + ZA[239]; ZA[242] = ZA[223] + ZA[241]; ZA[230] = ZA[227] + ZA[4]; ZA[246] = (ZCh(ZA[242], ZA[237], ZA[232]) + ZA[230]) + ZR26(ZA[242]); ZA[244] = ZMa(ZA[238], ZA[233], ZA[243]) + ZR30(ZA[243]); ZA[36] = ZA[35] + K[12]; ZA[7] = ZR15(ZA[3]) + K[13]; ZA[248] = ZA[246] + ZA[244]; ZA[247] = ZA[228] + ZA[246]; ZA[235] = ZA[232] + ZA[36]; ZA[251] = (ZCh(ZA[247], ZA[242], ZA[237]) + ZA[235]) + ZR26(ZA[247]); ZA[249] = ZMa(ZA[243], ZA[238], ZA[248]) + ZR30(ZA[248]); ZA[8] = ZA[7] + K[14]; ZA[38] = ZR15(ZA[35]) + W16; ZA[253] = ZA[251] + ZA[249]; ZA[252] = ZA[233] + ZA[251]; ZA[240] = ZA[237] + ZA[8]; ZA[256] = (ZCh(ZA[252], ZA[247], ZA[242]) + ZA[240]) + ZR26(ZA[252]); ZA[254] = ZMa(ZA[248], ZA[243], ZA[253]) + ZR30(ZA[253]); ZA[40] = ZA[38] + K[15]; ZA[10] = ZR15(ZA[7]) + W17; ZA[258] = ZA[256] + ZA[254]; ZA[257] = ZA[238] + ZA[256]; ZA[245] = ZA[242] + ZA[40]; ZA[261] = (ZCh(ZA[257], ZA[252], ZA[247]) + ZA[245]) + ZR26(ZA[257]); ZA[259] = ZMa(ZA[253], ZA[248], ZA[258]) + ZR30(ZA[258]); ZA[13] = ZA[10] + K[16]; ZA[43] = ZR15(ZA[38]) + ZA[0]; ZA[263] = ZA[261] + ZA[259]; ZA[262] = ZA[243] + ZA[261]; ZA[250] = ZA[247] + ZA[13]; ZA[266] = (ZCh(ZA[262], ZA[257], ZA[252]) + ZA[250]) + ZR26(ZA[262]); ZA[264] = ZMa(ZA[258], ZA[253], ZA[263]) + ZR30(ZA[263]); ZA[11] = ZR15(ZA[10]); ZA[45] = ZA[43] + K[17]; ZA[52] = ZA[11] + ZA[32]; ZA[267] = ZA[248] + ZA[266]; ZA[255] = ZA[252] + ZA[45]; ZA[268] = ZA[266] + ZA[264]; ZA[271] = (ZCh(ZA[267], ZA[262], ZA[257]) + ZA[255]) + ZR26(ZA[267]); ZA[269] = ZMa(ZA[263], ZA[258], ZA[268]) + ZR30(ZA[268]); ZA[54] = ZA[52] + K[18]; ZA[48] = ZR15(ZA[43]) + ZA[3]; ZA[273] = ZA[271] + ZA[269]; ZA[272] = ZA[253] + ZA[271]; ZA[260] = ZA[257] + ZA[54]; ZA[276] = (ZCh(ZA[272], ZA[267], ZA[262]) + ZA[260]) + ZR26(ZA[272]); ZA[274] = ZMa(ZA[268], ZA[263], ZA[273]) + ZR30(ZA[273]); ZA[49] = ZA[48] + K[19]; ZA[61] = ZR15(ZA[52]) + ZA[35]; ZA[278] = ZA[276] + ZA[274]; ZA[277] = ZA[258] + ZA[276]; ZA[265] = ZA[262] + ZA[49]; ZA[281] = (ZCh(ZA[277], ZA[272], ZA[267]) + ZA[265]) + ZR26(ZA[277]); ZA[279] = ZMa(ZA[273], ZA[268], ZA[278]) + ZR30(ZA[278]); ZA[62] = ZA[61] + K[20]; ZA[53] = ZR15(ZA[48]) + ZA[7]; ZA[283] = ZA[281] + ZA[279]; ZA[282] = ZA[263] + ZA[281]; ZA[270] = ZA[267] + ZA[62]; ZA[286] = (ZCh(ZA[282], ZA[277], ZA[272]) + ZA[270]) + ZR26(ZA[282]); ZA[284] = ZMa(ZA[278], ZA[273], ZA[283]) + ZR30(ZA[283]); ZA[39] = ZA[38] + K[21]; ZA[55] = ZA[53] + K[22]; ZA[66] = ZR15(ZA[61]) + ZA[39]; ZA[288] = ZA[286] + ZA[284]; ZA[287] = ZA[268] + ZA[286]; ZA[275] = ZA[272] + ZA[55]; ZA[291] = (ZCh(ZA[287], ZA[282], ZA[277]) + ZA[275]) + ZR26(ZA[287]); ZA[289] = ZMa(ZA[283], ZA[278], ZA[288]) + ZR30(ZA[288]); ZA[12] = ZA[10] + W31; ZA[68] = ZA[66] + K[23]; ZA[67] = ZR15(ZA[53]) + ZA[12]; ZA[293] = ZA[291] + ZA[289]; ZA[292] = ZA[273] + ZA[291]; ZA[280] = ZA[277] + ZA[68]; ZA[296] = (ZCh(ZA[292], ZA[287], ZA[282]) + ZA[280]) + ZR26(ZA[292]); ZA[294] = ZMa(ZA[288], ZA[283], ZA[293]) + ZR30(ZA[293]); ZA[2] = ZR25(ZA[0]); ZA[69] = ZA[67] + K[24]; ZA[44] = ZA[43] + W32; ZA[75] = ZR15(ZA[66]) + ZA[44]; ZA[298] = ZA[296] + ZA[294]; ZA[297] = ZA[278] + ZA[296]; ZA[285] = ZA[282] + ZA[69]; ZA[5] = ZA[2] + W17; ZA[301] = (ZCh(ZA[297], ZA[292], ZA[287]) + ZA[285]) + ZR26(ZA[297]); ZA[299] = ZMa(ZA[293], ZA[288], ZA[298]) + ZR30(ZA[298]); ZA[56] = ZA[52] + ZA[5]; ZA[76] = ZA[75] + K[25]; ZA[34] = ZR25(ZA[32]) + ZA[0]; ZA[70] = ZR15(ZA[67]) + ZA[56]; ZA[302] = ZA[283] + ZA[301]; ZA[303] = ZA[301] + ZA[299]; ZA[290] = ZA[287] + ZA[76]; ZA[306] = (ZCh(ZA[302], ZA[297], ZA[292]) + ZA[290]) + ZR26(ZA[302]); ZA[304] = ZMa(ZA[298], ZA[293], ZA[303]) + ZR30(ZA[303]); ZA[6] = ZR25(ZA[3]); ZA[77] = ZA[70] + K[26]; ZA[50] = ZA[34] + ZA[48]; ZA[78] = ZR15(ZA[75]) + ZA[50]; ZA[308] = ZA[306] + ZA[304]; ZA[307] = ZA[288] + ZA[306]; ZA[295] = ZA[292] + ZA[77]; ZA[41] = ZA[32] + ZA[6]; ZA[311] = (ZCh(ZA[307], ZA[302], ZA[297]) + ZA[295]) + ZR26(ZA[307]); ZA[309] = ZMa(ZA[303], ZA[298], ZA[308]) + ZR30(ZA[308]); ZA[63] = ZA[41] + ZA[61]; ZA[85] = ZA[78] + K[27]; ZA[37] = ZR25(ZA[35]) + ZA[3]; ZA[79] = ZR15(ZA[70]) + ZA[63]; ZA[312] = ZA[293] + ZA[311]; ZA[313] = ZA[311] + ZA[309]; ZA[300] = ZA[297] + ZA[85]; ZA[316] = (ZCh(ZA[312], ZA[307], ZA[302]) + ZA[300]) + ZR26(ZA[312]); ZA[314] = ZMa(ZA[308], ZA[303], ZA[313]) + ZR30(ZA[313]); ZA[9] = ZR25(ZA[7]); ZA[86] = ZA[79] + K[28]; ZA[57] = ZA[37] + ZA[53]; ZA[87] = ZR15(ZA[78]) + ZA[57]; ZA[318] = ZA[316] + ZA[314]; ZA[317] = ZA[298] + ZA[316]; ZA[305] = ZA[302] + ZA[86]; ZA[46] = ZA[35] + ZA[9]; ZA[321] = (ZCh(ZA[317], ZA[312], ZA[307]) + ZA[305]) + ZR26(ZA[317]); ZA[319] = ZMa(ZA[313], ZA[308], ZA[318]) + ZR30(ZA[318]); ZA[71] = ZA[46] + ZA[66]; ZA[92] = ZA[87] + K[29]; ZA[42] = ZR25(ZA[38]) + ZA[7]; ZA[88] = ZR15(ZA[79]) + ZA[71]; ZA[322] = ZA[303] + ZA[321]; ZA[323] = ZA[321] + ZA[319]; ZA[310] = ZA[307] + ZA[92]; ZA[326] = (ZCh(ZA[322], ZA[317], ZA[312]) + ZA[310]) + ZR26(ZA[322]); ZA[324] = ZMa(ZA[318], ZA[313], ZA[323]) + ZR30(ZA[323]); ZA[14] = ZR25(ZA[10]); ZA[93] = ZA[88] + K[30]; ZA[72] = ZA[42] + ZA[67]; ZA[94] = ZR15(ZA[87]) + ZA[72]; ZA[328] = ZA[326] + ZA[324]; ZA[327] = ZA[308] + ZA[326]; ZA[315] = ZA[312] + ZA[93]; ZA[51] = ZA[38] + ZA[14]; ZA[331] = (ZCh(ZA[327], ZA[322], ZA[317]) + ZA[315]) + ZR26(ZA[327]); ZA[329] = ZMa(ZA[323], ZA[318], ZA[328]) + ZR30(ZA[328]); ZA[80] = ZA[51] + ZA[75]; ZA[100] = ZA[94] + K[31]; ZA[47] = ZR25(ZA[43]) + ZA[10]; ZA[95] = ZR15(ZA[88]) + ZA[80]; ZA[332] = ZA[313] + ZA[331]; ZA[333] = ZA[331] + ZA[329]; ZA[320] = ZA[317] + ZA[100]; ZA[336] = (ZCh(ZA[332], ZA[327], ZA[322]) + ZA[320]) + ZR26(ZA[332]); ZA[334] = ZMa(ZA[328], ZA[323], ZA[333]) + ZR30(ZA[333]); ZA[81] = ZA[47] + ZA[70]; ZA[101] = ZA[95] + K[32]; ZA[58] = ZR25(ZA[52]) + ZA[43]; ZA[102] = ZR15(ZA[94]) + ZA[81]; ZA[337] = ZA[318] + ZA[336]; ZA[338] = ZA[336] + ZA[334]; ZA[325] = ZA[322] + ZA[101]; ZA[341] = (ZCh(ZA[337], ZA[332], ZA[327]) + ZA[325]) + ZR26(ZA[337]); ZA[339] = ZMa(ZA[333], ZA[328], ZA[338]) + ZR30(ZA[338]); ZA[89] = ZA[58] + ZA[78]; ZA[108] = ZA[102] + K[33]; ZA[59] = ZR25(ZA[48]) + ZA[52]; ZA[103] = ZR15(ZA[95]) + ZA[89]; ZA[342] = ZA[323] + ZA[341]; ZA[343] = ZA[341] + ZA[339]; ZA[330] = ZA[327] + ZA[108]; ZA[346] = (ZCh(ZA[342], ZA[337], ZA[332]) + ZA[330]) + ZR26(ZA[342]); ZA[344] = ZMa(ZA[338], ZA[333], ZA[343]) + ZR30(ZA[343]); ZA[90] = ZA[59] + ZA[79]; ZA[109] = ZA[103] + K[34]; ZA[64] = ZR25(ZA[61]) + ZA[48]; ZA[110] = ZR15(ZA[102]) + ZA[90]; ZA[347] = ZA[328] + ZA[346]; ZA[348] = ZA[346] + ZA[344]; ZA[335] = ZA[332] + ZA[109]; ZA[351] = (ZCh(ZA[347], ZA[342], ZA[337]) + ZA[335]) + ZR26(ZA[347]); ZA[349] = ZMa(ZA[343], ZA[338], ZA[348]) + ZR30(ZA[348]); ZA[60] = ZR25(ZA[53]); ZA[116] = ZA[110] + K[35]; ZA[96] = ZA[87] + ZA[64]; ZA[111] = ZR15(ZA[103]) + ZA[96]; ZA[353] = ZA[351] + ZA[349]; ZA[352] = ZA[333] + ZA[351]; ZA[340] = ZA[337] + ZA[116]; ZA[65] = ZA[60] + ZA[61]; ZA[356] = (ZCh(ZA[352], ZA[347], ZA[342]) + ZA[340]) + ZR26(ZA[352]); ZA[354] = ZMa(ZA[348], ZA[343], ZA[353]) + ZR30(ZA[353]); ZA[97] = ZA[88] + ZA[65]; ZA[117] = ZA[111] + K[36]; ZA[73] = ZR25(ZA[66]) + ZA[53]; ZA[118] = ZR15(ZA[110]) + ZA[97]; ZA[357] = ZA[338] + ZA[356]; ZA[358] = ZA[356] + ZA[354]; ZA[345] = ZA[342] + ZA[117]; ZA[361] = (ZCh(ZA[357], ZA[352], ZA[347]) + ZA[345]) + ZR26(ZA[357]); ZA[359] = ZMa(ZA[353], ZA[348], ZA[358]) + ZR30(ZA[358]); ZA[104] = ZA[73] + ZA[94]; ZA[124] = ZA[118] + K[37]; ZA[74] = ZR25(ZA[67]) + ZA[66]; ZA[119] = ZR15(ZA[111]) + ZA[104]; ZA[362] = ZA[343] + ZA[361]; ZA[363] = ZA[361] + ZA[359]; ZA[350] = ZA[347] + ZA[124]; ZA[366] = (ZCh(ZA[362], ZA[357], ZA[352]) + ZA[350]) + ZR26(ZA[362]); ZA[364] = ZMa(ZA[358], ZA[353], ZA[363]) + ZR30(ZA[363]); ZA[105] = ZA[74] + ZA[95]; ZA[125] = ZA[119] + K[38]; ZA[82] = ZR25(ZA[75]) + ZA[67]; ZA[126] = ZR15(ZA[118]) + ZA[105]; ZA[367] = ZA[348] + ZA[366]; ZA[368] = ZA[366] + ZA[364]; ZA[355] = ZA[352] + ZA[125]; ZA[371] = (ZCh(ZA[367], ZA[362], ZA[357]) + ZA[355]) + ZR26(ZA[367]); ZA[369] = ZMa(ZA[363], ZA[358], ZA[368]) + ZR30(ZA[368]); ZA[112] = ZA[102] + ZA[82]; ZA[132] = ZA[126] + K[39]; ZA[83] = ZR25(ZA[70]) + ZA[75]; ZA[127] = ZR15(ZA[119]) + ZA[112]; ZA[372] = ZA[353] + ZA[371]; ZA[373] = ZA[371] + ZA[369]; ZA[360] = ZA[357] + ZA[132]; ZA[376] = (ZCh(ZA[372], ZA[367], ZA[362]) + ZA[360]) + ZR26(ZA[372]); ZA[374] = ZMa(ZA[368], ZA[363], ZA[373]) + ZR30(ZA[373]); ZA[113] = ZA[103] + ZA[83]; ZA[133] = ZA[127] + K[40]; ZA[84] = ZR25(ZA[78]) + ZA[70]; ZA[134] = ZR15(ZA[126]) + ZA[113]; ZA[377] = ZA[358] + ZA[376]; ZA[378] = ZA[376] + ZA[374]; ZA[365] = ZA[362] + ZA[133]; ZA[381] = (ZCh(ZA[377], ZA[372], ZA[367]) + ZA[365]) + ZR26(ZA[377]); ZA[379] = ZMa(ZA[373], ZA[368], ZA[378]) + ZR30(ZA[378]); ZA[120] = ZA[110] + ZA[84]; ZA[140] = ZA[134] + K[41]; ZA[91] = ZR25(ZA[79]) + ZA[78]; ZA[135] = ZR15(ZA[127]) + ZA[120]; ZA[382] = ZA[363] + ZA[381]; ZA[383] = ZA[381] + ZA[379]; ZA[370] = ZA[367] + ZA[140]; ZA[386] = (ZCh(ZA[382], ZA[377], ZA[372]) + ZA[370]) + ZR26(ZA[382]); ZA[384] = ZMa(ZA[378], ZA[373], ZA[383]) + ZR30(ZA[383]); ZA[121] = ZA[111] + ZA[91]; ZA[141] = ZA[135] + K[42]; ZA[98] = ZR25(ZA[87]) + ZA[79]; ZA[142] = ZR15(ZA[134]) + ZA[121]; ZA[387] = ZA[368] + ZA[386]; ZA[388] = ZA[386] + ZA[384]; ZA[375] = ZA[372] + ZA[141]; ZA[391] = (ZCh(ZA[387], ZA[382], ZA[377]) + ZA[375]) + ZR26(ZA[387]); ZA[389] = ZMa(ZA[383], ZA[378], ZA[388]) + ZR30(ZA[388]); ZA[128] = ZA[118] + ZA[98]; ZA[147] = ZA[142] + K[43]; ZA[99] = ZR25(ZA[88]) + ZA[87]; ZA[143] = ZR15(ZA[135]) + ZA[128]; ZA[392] = ZA[373] + ZA[391]; ZA[393] = ZA[391] + ZA[389]; ZA[380] = ZA[377] + ZA[147]; ZA[396] = (ZCh(ZA[392], ZA[387], ZA[382]) + ZA[380]) + ZR26(ZA[392]); ZA[394] = ZMa(ZA[388], ZA[383], ZA[393]) + ZR30(ZA[393]); ZA[129] = ZA[119] + ZA[99]; ZA[148] = ZA[143] + K[44]; ZA[106] = ZR25(ZA[94]) + ZA[88]; ZA[149] = ZR15(ZA[142]) + ZA[129]; ZA[397] = ZA[378] + ZA[396]; ZA[398] = ZA[396] + ZA[394]; ZA[385] = ZA[382] + ZA[148]; ZA[401] = (ZCh(ZA[397], ZA[392], ZA[387]) + ZA[385]) + ZR26(ZA[397]); ZA[399] = ZMa(ZA[393], ZA[388], ZA[398]) + ZR30(ZA[398]); ZA[136] = ZA[126] + ZA[106]; ZA[153] = ZA[149] + K[45]; ZA[107] = ZR25(ZA[95]) + ZA[94]; ZA[150] = ZR15(ZA[143]) + ZA[136]; ZA[402] = ZA[383] + ZA[401]; ZA[403] = ZA[401] + ZA[399]; ZA[390] = ZA[387] + ZA[153]; ZA[406] = (ZCh(ZA[402], ZA[397], ZA[392]) + ZA[390]) + ZR26(ZA[402]); ZA[404] = ZMa(ZA[398], ZA[393], ZA[403]) + ZR30(ZA[403]); ZA[137] = ZA[127] + ZA[107]; ZA[154] = ZA[150] + K[46]; ZA[114] = ZR25(ZA[102]) + ZA[95]; ZA[155] = ZR15(ZA[149]) + ZA[137]; ZA[407] = ZA[388] + ZA[406]; ZA[408] = ZA[406] + ZA[404]; ZA[395] = ZA[392] + ZA[154]; ZA[411] = (ZCh(ZA[407], ZA[402], ZA[397]) + ZA[395]) + ZR26(ZA[407]); ZA[409] = ZMa(ZA[403], ZA[398], ZA[408]) + ZR30(ZA[408]); ZA[144] = ZA[134] + ZA[114]; ZA[159] = ZA[155] + K[47]; ZA[115] = ZR25(ZA[103]) + ZA[102]; ZA[156] = ZR15(ZA[150]) + ZA[144]; ZA[412] = ZA[393] + ZA[411]; ZA[413] = ZA[411] + ZA[409]; ZA[400] = ZA[397] + ZA[159]; ZA[416] = (ZCh(ZA[412], ZA[407], ZA[402]) + ZA[400]) + ZR26(ZA[412]); ZA[414] = ZMa(ZA[408], ZA[403], ZA[413]) + ZR30(ZA[413]); ZA[145] = ZA[135] + ZA[115]; ZA[160] = ZA[156] + K[48]; ZA[122] = ZR25(ZA[110]) + ZA[103]; ZA[161] = ZR15(ZA[155]) + ZA[145]; ZA[417] = ZA[398] + ZA[416]; ZA[418] = ZA[416] + ZA[414]; ZA[405] = ZA[402] + ZA[160]; ZA[421] = (ZCh(ZA[417], ZA[412], ZA[407]) + ZA[405]) + ZR26(ZA[417]); ZA[419] = ZMa(ZA[413], ZA[408], ZA[418]) + ZR30(ZA[418]); ZA[151] = ZA[142] + ZA[122]; ZA[165] = ZA[161] + K[49]; ZA[123] = ZR25(ZA[111]) + ZA[110]; ZA[162] = ZR15(ZA[156]) + ZA[151]; ZA[422] = ZA[403] + ZA[421]; ZA[423] = ZA[421] + ZA[419]; ZA[410] = ZA[407] + ZA[165]; ZA[426] = (ZCh(ZA[422], ZA[417], ZA[412]) + ZA[410]) + ZR26(ZA[422]); ZA[424] = ZMa(ZA[418], ZA[413], ZA[423]) + ZR30(ZA[423]); ZA[152] = ZA[143] + ZA[123]; ZA[166] = ZA[162] + K[50]; ZA[130] = ZR25(ZA[118]) + ZA[111]; ZA[167] = ZR15(ZA[161]) + ZA[152]; ZA[427] = ZA[408] + ZA[426]; ZA[428] = ZA[426] + ZA[424]; ZA[415] = ZA[412] + ZA[166]; ZA[431] = (ZCh(ZA[427], ZA[422], ZA[417]) + ZA[415]) + ZR26(ZA[427]); ZA[429] = ZMa(ZA[423], ZA[418], ZA[428]) + ZR30(ZA[428]); ZA[157] = ZA[149] + ZA[130]; ZA[170] = ZA[167] + K[51]; ZA[131] = ZR25(ZA[119]) + ZA[118]; ZA[168] = ZR15(ZA[162]) + ZA[157]; ZA[432] = ZA[413] + ZA[431]; ZA[433] = ZA[431] + ZA[429]; ZA[420] = ZA[417] + ZA[170]; ZA[436] = (ZCh(ZA[432], ZA[427], ZA[422]) + ZA[420]) + ZR26(ZA[432]); ZA[434] = ZMa(ZA[428], ZA[423], ZA[433]) + ZR30(ZA[433]); ZA[158] = ZA[150] + ZA[131]; ZA[171] = ZA[168] + K[52]; ZA[138] = ZR25(ZA[126]) + ZA[119]; ZA[172] = ZR15(ZA[167]) + ZA[158]; ZA[437] = ZA[418] + ZA[436]; ZA[438] = ZA[436] + ZA[434]; ZA[425] = ZA[422] + ZA[171]; ZA[441] = (ZCh(ZA[437], ZA[432], ZA[427]) + ZA[425]) + ZR26(ZA[437]); ZA[439] = ZMa(ZA[433], ZA[428], ZA[438]) + ZR30(ZA[438]); ZA[163] = ZA[155] + ZA[138]; ZA[174] = ZA[172] + K[53]; ZA[139] = ZR25(ZA[127]) + ZA[126]; ZA[173] = ZR15(ZA[168]) + ZA[163]; ZA[442] = ZA[423] + ZA[441]; ZA[443] = ZA[441] + ZA[439]; ZA[430] = ZA[427] + ZA[174]; ZA[445] = (ZCh(ZA[442], ZA[437], ZA[432]) + ZA[430]) + ZR26(ZA[442]); ZA[444] = ZMa(ZA[438], ZA[433], ZA[443]) + ZR30(ZA[443]); ZA[164] = ZA[156] + ZA[139]; ZA[175] = ZA[173] + K[54]; ZA[146] = ZR25(ZA[134]) + ZA[127]; ZA[176] = ZR15(ZA[172]) + ZA[164]; ZA[446] = ZA[428] + ZA[445]; ZA[447] = ZA[445] + ZA[444]; ZA[435] = ZA[432] + ZA[175]; ZA[449] = (ZCh(ZA[446], ZA[442], ZA[437]) + ZA[435]) + ZR26(ZA[446]); ZA[448] = ZMa(ZA[443], ZA[438], ZA[447]) + ZR30(ZA[447]); ZA[169] = ZA[161] + ZA[146]; ZA[178] = ZA[176] + K[55]; ZA[177] = ZR15(ZA[173]) + ZA[169]; ZA[451] = ZA[449] + ZA[448]; ZA[450] = ZA[433] + ZA[449]; ZA[440] = ZA[437] + ZA[178]; ZA[453] = (ZCh(ZA[450], ZA[446], ZA[442]) + ZA[440]) + ZR26(ZA[450]); ZA[452] = ZMa(ZA[447], ZA[443], ZA[451]) + ZR30(ZA[451]); ZA[179] = ZA[177] + K[56]; ZA[454] = ZA[438] + ZA[453]; ZA[494] = ZA[442] + ZA[179]; ZA[455] = ZA[453] + ZA[452]; ZA[457] = (ZCh(ZA[454], ZA[450], ZA[446]) + ZA[494]) + ZR26(ZA[454]); ZA[456] = ZMa(ZA[451], ZA[447], ZA[455]) + ZR30(ZA[455]); ZA[459] = ZA[457] + ZA[456]; ZA[461] = ZA[455] + state1; ZA[460] = ZA[459] + state0; ZA[495] = ZA[460] + K[57]; ZA[469] = ZA[461] + K[58]; ZA[498] = (ZCh(ZA[495], K[59], K[60]) + ZA[469]) + ZR26(ZA[495]); ZA[462] = ZA[451] + state2; ZA[496] = ZA[460] + K[61]; ZA[506] = ZA[498] + K[62]; ZA[470] = ZA[462] + K[63]; ZA[507] = (ZCh(ZA[506], ZA[495], K[59]) + ZA[470]) + ZR26(ZA[506]); ZA[500] = ZMa(K[64], K[65], ZA[496]) + ZR30(ZA[496]); ZA[463] = ZA[447] + state3; ZA[458] = ZA[443] + ZA[457]; ZA[499] = ZA[498] + ZA[500]; ZA[508] = ZA[507] + K[65]; ZA[473] = ZA[463] + K[66]; ZA[510] = (ZCh(ZA[508], ZA[506], ZA[495]) + ZA[473]) + ZR26(ZA[508]); ZA[928] = ZMa(ZA[496], K[64], ZA[499]) + ZR30(ZA[499]); ZA[464] = ZA[458] + state4; ZA[476] = ZA[464] + ZA[460] + K[67]; ZA[511] = ZA[510] + K[64]; ZA[509] = ZA[928] + ZA[507]; ZA[465] = ZA[454] + state5; ZA[514] = (ZCh(ZA[511], ZA[508], ZA[506]) + ZA[476]) + ZR26(ZA[511]); ZA[512] = ZMa(ZA[499], ZA[496], ZA[509]) + ZR30(ZA[509]); ZA[478] = ZA[465] + K[68]; ZA[519] = ZA[506] + ZA[478]; ZA[516] = ZA[496] + ZA[514]; ZA[513] = ZA[510] + ZA[512]; ZA[466] = ZA[450] + state6; ZA[520] = (ZCh(ZA[516], ZA[511], ZA[508]) + ZA[519]) + ZR26(ZA[516]); ZA[515] = ZMa(ZA[509], ZA[499], ZA[513]) + ZR30(ZA[513]); ZA[480] = ZA[466] + K[69]; ZA[524] = ZA[508] + ZA[480]; ZA[521] = ZA[499] + ZA[520]; ZA[517] = ZA[514] + ZA[515]; ZA[467] = ZA[446] + state7; ZA[525] = (ZCh(ZA[521], ZA[516], ZA[511]) + ZA[524]) + ZR26(ZA[521]); ZA[522] = ZMa(ZA[513], ZA[509], ZA[517]) + ZR30(ZA[517]); ZA[484] = ZA[467] + K[70]; ZA[529] = ZA[511] + ZA[484]; ZA[526] = ZA[509] + ZA[525]; ZA[523] = ZA[520] + ZA[522]; ZA[530] = (ZCh(ZA[526], ZA[521], ZA[516]) + ZA[529]) + ZR26(ZA[526]); ZA[550] = ZMa(ZA[517], ZA[513], ZA[523]) + ZR30(ZA[523]); ZA[531] = ZA[513] + ZA[530]; ZA[533] = ZA[516] + K[71]; ZA[527] = ZA[550] + ZA[525]; ZA[534] = (ZCh(ZA[531], ZA[526], ZA[521]) + ZA[533]) + ZR26(ZA[531]); ZA[551] = ZMa(ZA[523], ZA[517], ZA[527]) + ZR30(ZA[527]); ZA[535] = ZA[517] + ZA[534]; ZA[538] = ZA[521] + K[1]; ZA[532] = ZA[551] + ZA[530]; ZA[539] = (ZCh(ZA[535], ZA[531], ZA[526]) + ZA[538]) + ZR26(ZA[535]); ZA[552] = ZMa(ZA[527], ZA[523], ZA[532]) + ZR30(ZA[532]); ZA[540] = ZA[523] + ZA[539]; ZA[542] = ZA[526] + K[2]; ZA[536] = ZA[552] + ZA[534]; ZA[543] = (ZCh(ZA[540], ZA[535], ZA[531]) + ZA[542]) + ZR26(ZA[540]); ZA[553] = ZMa(ZA[532], ZA[527], ZA[536]) + ZR30(ZA[536]); ZA[544] = ZA[527] + ZA[543]; ZA[555] = ZA[531] + K[3]; ZA[541] = ZA[553] + ZA[539]; ZA[558] = (ZCh(ZA[544], ZA[540], ZA[535]) + ZA[555]) + ZR26(ZA[544]); ZA[547] = ZMa(ZA[536], ZA[532], ZA[541]) + ZR30(ZA[541]); ZA[559] = ZA[532] + ZA[558]; ZA[556] = ZA[535] + K[4]; ZA[545] = ZA[547] + ZA[543]; ZA[562] = (ZCh(ZA[559], ZA[544], ZA[540]) + ZA[556]) + ZR26(ZA[559]); ZA[561] = ZMa(ZA[541], ZA[536], ZA[545]) + ZR30(ZA[545]); ZA[563] = ZA[536] + ZA[562]; ZA[560] = ZA[561] + ZA[558]; ZA[557] = ZA[540] + K[5]; ZA[568] = (ZCh(ZA[563], ZA[559], ZA[544]) + ZA[557]) + ZR26(ZA[563]); ZA[564] = ZMa(ZA[545], ZA[541], ZA[560]) + ZR30(ZA[560]); ZA[569] = ZA[541] + ZA[568]; ZA[572] = ZA[544] + K[6]; ZA[565] = ZA[562] + ZA[564]; ZA[574] = (ZCh(ZA[569], ZA[563], ZA[559]) + ZA[572]) + ZR26(ZA[569]); ZA[570] = ZMa(ZA[560], ZA[545], ZA[565]) + ZR30(ZA[565]); ZA[468] = ZR25(ZA[461]); ZA[497] = ZA[468] + ZA[460]; ZA[575] = ZA[545] + ZA[574]; ZA[571] = ZA[568] + ZA[570]; ZA[573] = ZA[559] + K[72]; ZA[578] = (ZCh(ZA[575], ZA[569], ZA[563]) + ZA[573]) + ZR26(ZA[575]); ZA[576] = ZMa(ZA[565], ZA[560], ZA[571]) + ZR30(ZA[571]); ZA[929] = ZR25(ZA[462]); ZA[503] = ZA[497] + 0xe49b69c1U; ZA[471] = ZA[929] + ZA[461] + K[74]; ZA[582] = ZA[563] + ZA[503]; ZA[579] = ZA[560] + ZA[578]; ZA[577] = ZA[574] + ZA[576]; ZA[583] = (ZCh(ZA[579], ZA[575], ZA[569]) + ZA[582]) + ZR26(ZA[579]); ZA[580] = ZMa(ZA[571], ZA[565], ZA[577]) + ZR30(ZA[577]); ZA[488] = ZA[471] + K[75]; ZA[472] = ZR25(ZA[463]) + ZA[462]; ZA[587] = ZA[569] + ZA[488]; ZA[584] = ZA[565] + ZA[583]; ZA[581] = ZA[578] + ZA[580]; ZA[588] = (ZCh(ZA[584], ZA[579], ZA[575]) + ZA[587]) + ZR26(ZA[584]); ZA[586] = ZMa(ZA[577], ZA[571], ZA[581]) + ZR30(ZA[581]); ZA[501] = ZR15(ZA[497]) + ZA[472]; ZA[475] = ZR15(ZA[471]); ZA[926] = ZA[575] + K[8]; ZA[474] = ZA[475] + ZA[463] + ZR25(ZA[464]); ZA[927] = ZA[926] + ZA[501]; ZA[589] = ZA[571] + ZA[588]; ZA[585] = ZA[583] + ZA[586]; ZA[592] = (ZCh(ZA[589], ZA[584], ZA[579]) + ZA[927]) + ZR26(ZA[589]); ZA[590] = ZMa(ZA[581], ZA[577], ZA[585]) + ZR30(ZA[585]); ZA[477] = ZR25(ZA[465]) + ZA[464]; ZA[489] = ZA[474] + K[9]; ZA[518] = ZR15(ZA[501]) + ZA[477]; ZA[479] = ZR25(ZA[466]); ZA[596] = ZA[579] + ZA[489]; ZA[593] = ZA[577] + ZA[592]; ZA[591] = ZA[588] + ZA[590]; ZA[597] = (ZCh(ZA[593], ZA[589], ZA[584]) + ZA[596]) + ZR26(ZA[593]); ZA[594] = ZMa(ZA[585], ZA[581], ZA[591]) + ZR30(ZA[591]); ZA[481] = ZA[479] + ZA[465]; ZA[601] = ZA[518] + K[11]; ZA[482] = ZR15(ZA[474]) + ZA[481]; ZA[602] = ZA[584] + ZA[601]; ZA[598] = ZA[581] + ZA[597]; ZA[595] = ZA[592] + ZA[594]; ZA[632] = (ZCh(ZA[598], ZA[593], ZA[589]) + ZA[602]) + ZR26(ZA[598]); ZA[599] = ZMa(ZA[591], ZA[585], ZA[595]) + ZR30(ZA[595]); ZA[483] = ZA[466] + K[76] + ZR25(ZA[467]); ZA[490] = ZA[482] + K[12]; ZA[528] = ZR15(ZA[518]) + ZA[483]; ZA[736] = ZA[585] + ZA[632]; ZA[605] = ZA[589] + ZA[490]; ZA[600] = ZA[597] + ZA[599]; ZA[485] = ZA[467] + K[77]; ZA[738] = (ZCh(ZA[736], ZA[598], ZA[593]) + ZA[605]) + ZR26(ZA[736]); ZA[744] = ZMa(ZA[595], ZA[591], ZA[600]) + ZR30(ZA[600]); ZA[487] = ZR15(ZA[482]) + ZA[485]; ZA[603] = ZA[528] + K[14]; ZA[502] = ZA[497] + ZA[487]; ZA[739] = ZA[591] + ZA[738]; ZA[604] = ZA[593] + ZA[603]; ZA[737] = ZA[744] + ZA[632]; ZA[741] = (ZCh(ZA[739], ZA[736], ZA[598]) + ZA[604]) + ZR26(ZA[739]); ZA[745] = ZMa(ZA[600], ZA[595], ZA[737]) + ZR30(ZA[737]); ZA[486] = ZA[471] + K[10]; ZA[606] = ZA[502] + K[15]; ZA[537] = ZR15(ZA[528]) + ZA[486]; ZA[742] = ZA[595] + ZA[741]; ZA[613] = ZA[598] + ZA[606]; ZA[740] = ZA[745] + ZA[738]; ZA[747] = (ZCh(ZA[742], ZA[739], ZA[736]) + ZA[613]) + ZR26(ZA[742]); ZA[746] = ZMa(ZA[737], ZA[600], ZA[740]) + ZR30(ZA[740]); ZA[607] = ZA[537] + K[16]; ZA[546] = ZR15(ZA[502]) + ZA[501]; ZA[751] = ZA[736] + ZA[607]; ZA[748] = ZA[600] + ZA[747]; ZA[743] = ZA[746] + ZA[741]; ZA[752] = (ZCh(ZA[748], ZA[742], ZA[739]) + ZA[751]) + ZR26(ZA[748]); ZA[749] = ZMa(ZA[740], ZA[737], ZA[743]) + ZR30(ZA[743]); ZA[608] = ZA[546] + K[17]; ZA[554] = ZR15(ZA[537]) + ZA[474]; ZA[756] = ZA[739] + ZA[608]; ZA[753] = ZA[737] + ZA[752]; ZA[750] = ZA[747] + ZA[749]; ZA[757] = (ZCh(ZA[753], ZA[748], ZA[742]) + ZA[756]) + ZR26(ZA[753]); ZA[754] = ZMa(ZA[743], ZA[740], ZA[750]) + ZR30(ZA[750]); ZA[609] = ZA[554] + K[18]; ZA[566] = ZR15(ZA[546]) + ZA[518]; ZA[761] = ZA[742] + ZA[609]; ZA[758] = ZA[740] + ZA[757]; ZA[755] = ZA[752] + ZA[754]; ZA[762] = (ZCh(ZA[758], ZA[753], ZA[748]) + ZA[761]) + ZR26(ZA[758]); ZA[759] = ZMa(ZA[750], ZA[743], ZA[755]) + ZR30(ZA[755]); ZA[610] = ZA[566] + K[19]; ZA[567] = ZR15(ZA[554]) + ZA[482]; ZA[766] = ZA[748] + ZA[610]; ZA[763] = ZA[743] + ZA[762]; ZA[760] = ZA[757] + ZA[759]; ZA[767] = (ZCh(ZA[763], ZA[758], ZA[753]) + ZA[766]) + ZR26(ZA[763]); ZA[764] = ZMa(ZA[755], ZA[750], ZA[760]) + ZR30(ZA[760]); ZA[611] = ZA[567] + K[20]; ZA[614] = ZR15(ZA[566]) + ZA[528]; ZA[771] = ZA[753] + ZA[611]; ZA[768] = ZA[750] + ZA[767]; ZA[765] = ZA[762] + ZA[764]; ZA[772] = (ZCh(ZA[768], ZA[763], ZA[758]) + ZA[771]) + ZR26(ZA[768]); ZA[769] = ZMa(ZA[760], ZA[755], ZA[765]) + ZR30(ZA[765]); ZA[612] = ZA[502] + K[78]; ZA[615] = ZA[614] + K[22]; ZA[616] = ZR15(ZA[567]) + ZA[612]; ZA[504] = ZR25(ZA[497]) + K[76]; ZA[776] = ZA[758] + ZA[615]; ZA[773] = ZA[755] + ZA[772]; ZA[770] = ZA[767] + ZA[769]; ZA[777] = (ZCh(ZA[773], ZA[768], ZA[763]) + ZA[776]) + ZR26(ZA[773]); ZA[774] = ZMa(ZA[765], ZA[760], ZA[770]) + ZR30(ZA[770]); ZA[492] = ZR25(ZA[471]); ZA[618] = ZA[537] + ZA[504]; ZA[617] = ZA[616] + K[23]; ZA[619] = ZR15(ZA[614]) + ZA[618]; ZA[781] = ZA[763] + ZA[617]; ZA[778] = ZA[760] + ZA[777]; ZA[775] = ZA[772] + ZA[774]; ZA[505] = ZA[492] + ZA[497]; ZA[782] = (ZCh(ZA[778], ZA[773], ZA[768]) + ZA[781]) + ZR26(ZA[778]); ZA[779] = ZMa(ZA[770], ZA[765], ZA[775]) + ZR30(ZA[775]); ZA[621] = ZA[505] + ZA[546]; ZA[620] = ZA[619] + K[24]; ZA[622] = ZR15(ZA[616]) + ZA[621]; ZA[625] = ZR25(ZA[501]); ZA[786] = ZA[768] + ZA[620]; ZA[783] = ZA[765] + ZA[782]; ZA[624] = ZA[554] + ZA[471]; ZA[780] = ZA[777] + ZA[779]; ZA[787] = (ZCh(ZA[783], ZA[778], ZA[773]) + ZA[786]) + ZR26(ZA[783]); ZA[784] = ZMa(ZA[775], ZA[770], ZA[780]) + ZR30(ZA[780]); ZA[493] = ZR25(ZA[474]); ZA[626] = ZA[625] + ZA[624]; ZA[623] = ZA[622] + K[25]; ZA[627] = ZR15(ZA[619]) + ZA[626]; ZA[791] = ZA[773] + ZA[623]; ZA[788] = ZA[770] + ZA[787]; ZA[785] = ZA[782] + ZA[784]; ZA[629] = ZA[493] + ZA[501]; ZA[792] = (ZCh(ZA[788], ZA[783], ZA[778]) + ZA[791]) + ZR26(ZA[788]); ZA[789] = ZMa(ZA[780], ZA[775], ZA[785]) + ZR30(ZA[785]); ZA[630] = ZA[566] + ZA[629]; ZA[628] = ZA[627] + K[26]; ZA[634] = ZR25(ZA[518]) + ZA[474]; ZA[631] = ZR15(ZA[622]) + ZA[630]; ZA[796] = ZA[778] + ZA[628]; ZA[793] = ZA[775] + ZA[792]; ZA[790] = ZA[787] + ZA[789]; ZA[797] = (ZCh(ZA[793], ZA[788], ZA[783]) + ZA[796]) + ZR26(ZA[793]); ZA[794] = ZMa(ZA[785], ZA[780], ZA[790]) + ZR30(ZA[790]); ZA[491] = ZR25(ZA[482]); ZA[635] = ZA[567] + ZA[634]; ZA[633] = ZA[631] + K[27]; ZA[636] = ZR15(ZA[627]) + ZA[635]; ZA[801] = ZA[783] + ZA[633]; ZA[798] = ZA[780] + ZA[797]; ZA[795] = ZA[792] + ZA[794]; ZA[638] = ZA[491] + ZA[518]; ZA[802] = (ZCh(ZA[798], ZA[793], ZA[788]) + ZA[801]) + ZR26(ZA[798]); ZA[799] = ZMa(ZA[790], ZA[785], ZA[795]) + ZR30(ZA[795]); ZA[639] = ZA[638] + ZA[614]; ZA[637] = ZA[636] + K[28]; ZA[642] = ZR25(ZA[528]) + ZA[482]; ZA[640] = ZR15(ZA[631]) + ZA[639]; ZA[806] = ZA[788] + ZA[637]; ZA[803] = ZA[785] + ZA[802]; ZA[800] = ZA[797] + ZA[799]; ZA[807] = (ZCh(ZA[803], ZA[798], ZA[793]) + ZA[806]) + ZR26(ZA[803]); ZA[804] = ZMa(ZA[795], ZA[790], ZA[800]) + ZR30(ZA[800]); ZA[643] = ZA[616] + ZA[642]; ZA[641] = ZA[640] + K[29]; ZA[646] = ZR25(ZA[502]) + ZA[528]; ZA[644] = ZR15(ZA[636]) + ZA[643]; ZA[811] = ZA[793] + ZA[641]; ZA[808] = ZA[790] + ZA[807]; ZA[805] = ZA[802] + ZA[804]; ZA[812] = (ZCh(ZA[808], ZA[803], ZA[798]) + ZA[811]) + ZR26(ZA[808]); ZA[809] = ZMa(ZA[800], ZA[795], ZA[805]) + ZR30(ZA[805]); ZA[647] = ZA[619] + ZA[646]; ZA[645] = ZA[644] + K[30]; ZA[650] = ZR25(ZA[537]) + ZA[502]; ZA[648] = ZR15(ZA[640]) + ZA[647]; ZA[816] = ZA[798] + ZA[645]; ZA[813] = ZA[795] + ZA[812]; ZA[810] = ZA[807] + ZA[809]; ZA[817] = (ZCh(ZA[813], ZA[808], ZA[803]) + ZA[816]) + ZR26(ZA[813]); ZA[814] = ZMa(ZA[805], ZA[800], ZA[810]) + ZR30(ZA[810]); ZA[925] = ZA[622] + ZA[650]; ZA[649] = ZA[648] + K[31]; ZA[653] = ZR25(ZA[546]) + ZA[537]; ZA[651] = ZR15(ZA[644]) + ZA[925]; ZA[821] = ZA[803] + ZA[649]; ZA[818] = ZA[800] + ZA[817]; ZA[815] = ZA[812] + ZA[814]; ZA[822] = (ZCh(ZA[818], ZA[813], ZA[808]) + ZA[821]) + ZR26(ZA[818]); ZA[819] = ZMa(ZA[810], ZA[805], ZA[815]) + ZR30(ZA[815]); ZA[654] = ZA[627] + ZA[653]; ZA[652] = ZA[651] + K[32]; ZA[657] = ZR25(ZA[554]) + ZA[546]; ZA[655] = ZR15(ZA[648]) + ZA[654]; ZA[826] = ZA[808] + ZA[652]; ZA[823] = ZA[805] + ZA[822]; ZA[820] = ZA[817] + ZA[819]; ZA[827] = (ZCh(ZA[823], ZA[818], ZA[813]) + ZA[826]) + ZR26(ZA[823]); ZA[824] = ZMa(ZA[815], ZA[810], ZA[820]) + ZR30(ZA[820]); ZA[658] = ZA[631] + ZA[657]; ZA[656] = ZA[655] + K[33]; ZA[661] = ZR25(ZA[566]) + ZA[554]; ZA[659] = ZR15(ZA[651]) + ZA[658]; ZA[831] = ZA[813] + ZA[656]; ZA[828] = ZA[810] + ZA[827]; ZA[825] = ZA[822] + ZA[824]; ZA[832] = (ZCh(ZA[828], ZA[823], ZA[818]) + ZA[831]) + ZR26(ZA[828]); ZA[829] = ZMa(ZA[820], ZA[815], ZA[825]) + ZR30(ZA[825]); ZA[662] = ZA[636] + ZA[661]; ZA[660] = ZA[659] + K[34]; ZA[665] = ZR25(ZA[567]) + ZA[566]; ZA[663] = ZR15(ZA[655]) + ZA[662]; ZA[836] = ZA[818] + ZA[660]; ZA[833] = ZA[815] + ZA[832]; ZA[830] = ZA[827] + ZA[829]; ZA[837] = (ZCh(ZA[833], ZA[828], ZA[823]) + ZA[836]) + ZR26(ZA[833]); ZA[834] = ZMa(ZA[825], ZA[820], ZA[830]) + ZR30(ZA[830]); ZA[666] = ZA[640] + ZA[665]; ZA[664] = ZA[663] + K[35]; ZA[669] = ZR25(ZA[614]) + ZA[567]; ZA[667] = ZR15(ZA[659]) + ZA[666]; ZA[841] = ZA[823] + ZA[664]; ZA[838] = ZA[820] + ZA[837]; ZA[835] = ZA[832] + ZA[834]; ZA[842] = (ZCh(ZA[838], ZA[833], ZA[828]) + ZA[841]) + ZR26(ZA[838]); ZA[839] = ZMa(ZA[830], ZA[825], ZA[835]) + ZR30(ZA[835]); ZA[670] = ZA[644] + ZA[669]; ZA[668] = ZA[667] + K[36]; ZA[677] = ZR25(ZA[616]) + ZA[614]; ZA[671] = ZR15(ZA[663]) + ZA[670]; ZA[846] = ZA[828] + ZA[668]; ZA[843] = ZA[825] + ZA[842]; ZA[840] = ZA[837] + ZA[839]; ZA[847] = (ZCh(ZA[843], ZA[838], ZA[833]) + ZA[846]) + ZR26(ZA[843]); ZA[844] = ZMa(ZA[835], ZA[830], ZA[840]) + ZR30(ZA[840]); ZA[678] = ZA[648] + ZA[677]; ZA[676] = ZA[671] + K[37]; ZA[682] = ZR25(ZA[619]) + ZA[616]; ZA[679] = ZR15(ZA[667]) + ZA[678]; ZA[851] = ZA[833] + ZA[676]; ZA[848] = ZA[830] + ZA[847]; ZA[845] = ZA[842] + ZA[844]; ZA[852] = (ZCh(ZA[848], ZA[843], ZA[838]) + ZA[851]) + ZR26(ZA[848]); ZA[849] = ZMa(ZA[840], ZA[835], ZA[845]) + ZR30(ZA[845]); ZA[683] = ZA[651] + ZA[682]; ZA[680] = ZA[679] + K[38]; ZA[686] = ZR25(ZA[622]) + ZA[619]; ZA[684] = ZR15(ZA[671]) + ZA[683]; ZA[856] = ZA[838] + ZA[680]; ZA[853] = ZA[835] + ZA[852]; ZA[850] = ZA[847] + ZA[849]; ZA[857] = (ZCh(ZA[853], ZA[848], ZA[843]) + ZA[856]) + ZR26(ZA[853]); ZA[854] = ZMa(ZA[845], ZA[840], ZA[850]) + ZR30(ZA[850]); ZA[687] = ZA[655] + ZA[686]; ZA[685] = ZA[684] + K[39]; ZA[690] = ZR25(ZA[627]) + ZA[622]; ZA[688] = ZR15(ZA[679]) + ZA[687]; ZA[861] = ZA[843] + ZA[685]; ZA[858] = ZA[840] + ZA[857]; ZA[855] = ZA[852] + ZA[854]; ZA[862] = (ZCh(ZA[858], ZA[853], ZA[848]) + ZA[861]) + ZR26(ZA[858]); ZA[859] = ZMa(ZA[850], ZA[845], ZA[855]) + ZR30(ZA[855]); ZA[691] = ZA[659] + ZA[690]; ZA[689] = ZA[688] + K[40]; ZA[694] = ZR25(ZA[631]) + ZA[627]; ZA[692] = ZR15(ZA[684]) + ZA[691]; ZA[866] = ZA[848] + ZA[689]; ZA[863] = ZA[845] + ZA[862]; ZA[860] = ZA[857] + ZA[859]; ZA[867] = (ZCh(ZA[863], ZA[858], ZA[853]) + ZA[866]) + ZR26(ZA[863]); ZA[864] = ZMa(ZA[855], ZA[850], ZA[860]) + ZR30(ZA[860]); ZA[695] = ZA[663] + ZA[694]; ZA[693] = ZA[692] + K[41]; ZA[698] = ZR25(ZA[636]) + ZA[631]; ZA[696] = ZR15(ZA[688]) + ZA[695]; ZA[871] = ZA[853] + ZA[693]; ZA[868] = ZA[850] + ZA[867]; ZA[865] = ZA[862] + ZA[864]; ZA[873] = (ZCh(ZA[868], ZA[863], ZA[858]) + ZA[871]) + ZR26(ZA[868]); ZA[869] = ZMa(ZA[860], ZA[855], ZA[865]) + ZR30(ZA[865]); ZA[699] = ZA[667] + ZA[698]; ZA[697] = ZA[696] + K[42]; ZA[702] = ZR25(ZA[640]) + ZA[636]; ZA[700] = ZR15(ZA[692]) + ZA[699]; ZA[877] = ZA[858] + ZA[697]; ZA[874] = ZA[855] + ZA[873]; ZA[870] = ZA[867] + ZA[869]; ZA[878] = (ZCh(ZA[874], ZA[868], ZA[863]) + ZA[877]) + ZR26(ZA[874]); ZA[875] = ZMa(ZA[865], ZA[860], ZA[870]) + ZR30(ZA[870]); ZA[703] = ZA[671] + ZA[702]; ZA[701] = ZA[700] + K[43]; ZA[706] = ZR25(ZA[644]) + ZA[640]; ZA[704] = ZR15(ZA[696]) + ZA[703]; ZA[882] = ZA[863] + ZA[701]; ZA[879] = ZA[860] + ZA[878]; ZA[876] = ZA[873] + ZA[875]; ZA[883] = (ZCh(ZA[879], ZA[874], ZA[868]) + ZA[882]) + ZR26(ZA[879]); ZA[880] = ZMa(ZA[870], ZA[865], ZA[876]) + ZR30(ZA[876]); ZA[707] = ZA[679] + ZA[706]; ZA[705] = ZA[704] + K[44]; ZA[710] = ZR25(ZA[648]) + ZA[644]; ZA[708] = ZR15(ZA[700]) + ZA[707]; ZA[887] = ZA[868] + ZA[705]; ZA[884] = ZA[865] + ZA[883]; ZA[881] = ZA[878] + ZA[880]; ZA[888] = (ZCh(ZA[884], ZA[879], ZA[874]) + ZA[887]) + ZR26(ZA[884]); ZA[885] = ZMa(ZA[876], ZA[870], ZA[881]) + ZR30(ZA[881]); ZA[711] = ZA[684] + ZA[710]; ZA[709] = ZA[708] + K[45]; ZA[714] = ZR25(ZA[651]) + ZA[648]; ZA[712] = ZR15(ZA[704]) + ZA[711]; ZA[892] = ZA[874] + ZA[709]; ZA[889] = ZA[870] + ZA[888]; ZA[886] = ZA[883] + ZA[885]; ZA[893] = (ZCh(ZA[889], ZA[884], ZA[879]) + ZA[892]) + ZR26(ZA[889]); ZA[890] = ZMa(ZA[881], ZA[876], ZA[886]) + ZR30(ZA[886]); ZA[715] = ZA[688] + ZA[714]; ZA[713] = ZA[712] + K[46]; ZA[718] = ZR25(ZA[655]) + ZA[651]; ZA[716] = ZR15(ZA[708]) + ZA[715]; ZA[897] = ZA[879] + ZA[713]; ZA[894] = ZA[876] + ZA[893]; ZA[891] = ZA[888] + ZA[890]; ZA[898] = (ZCh(ZA[894], ZA[889], ZA[884]) + ZA[897]) + ZR26(ZA[894]); ZA[895] = ZMa(ZA[886], ZA[881], ZA[891]) + ZR30(ZA[891]); ZA[719] = ZA[692] + ZA[718]; ZA[717] = ZA[716] + K[47]; ZA[722] = ZR25(ZA[659]) + ZA[655]; ZA[720] = ZR15(ZA[712]) + ZA[719]; ZA[902] = ZA[884] + ZA[717]; ZA[899] = ZA[881] + ZA[898]; ZA[896] = ZA[893] + ZA[895]; ZA[903] = (ZCh(ZA[899], ZA[894], ZA[889]) + ZA[902]) + ZR26(ZA[899]); ZA[900] = ZMa(ZA[891], ZA[886], ZA[896]) + ZR30(ZA[896]); ZA[723] = ZA[696] + ZA[722]; ZA[721] = ZA[720] + K[48]; ZA[672] = ZR25(ZA[663]) + ZA[659]; ZA[724] = ZR15(ZA[716]) + ZA[723]; ZA[907] = ZA[889] + ZA[721]; ZA[904] = ZA[886] + ZA[903]; ZA[901] = ZA[898] + ZA[900]; ZA[908] = (ZCh(ZA[904], ZA[899], ZA[894]) + ZA[907]) + ZR26(ZA[904]); ZA[905] = ZMa(ZA[896], ZA[891], ZA[901]) + ZR30(ZA[901]); ZA[673] = ZR25(ZA[667]) + ZA[663]; ZA[726] = ZA[700] + ZA[672]; ZA[725] = ZA[724] + K[49]; ZA[727] = ZR15(ZA[720]) + ZA[726]; ZA[912] = ZA[894] + ZA[725]; ZA[909] = ZA[891] + ZA[908]; ZA[906] = ZA[903] + ZA[905]; ZA[675] = ZA[667] + K[52]; ZA[729] = ZA[704] + ZA[673]; ZA[913] = (ZCh(ZA[909], ZA[904], ZA[899]) + ZA[912]) + ZR26(ZA[909]); ZA[910] = ZMa(ZA[901], ZA[896], ZA[906]) + ZR30(ZA[906]); ZA[674] = ZR25(ZA[671]) + ZA[675]; ZA[730] = ZR15(ZA[724]) + ZA[729]; ZA[728] = ZA[727] + K[50]; ZA[681] = ZR25(ZA[679]) + ZA[671]; ZA[917] = ZA[899] + ZA[901] + ZA[728]; ZA[914] = ZA[896] + ZA[913]; ZA[911] = ZA[908] + ZA[910]; ZA[732] = ZA[708] + ZA[674]; ZA[731] = ZA[730] + K[51]; ZA[918] = (ZCh(ZA[914], ZA[909], ZA[904]) + ZA[917]) + ZR26(ZA[914]); ZA[915] = ZMa(ZA[906], ZA[901], ZA[911]) + ZR30(ZA[911]); ZA[733] = ZR15(ZA[727]) + ZA[732]; ZA[919] = ZA[906] + ZA[904] + ZA[731]; ZA[734] = ZA[712] + ZA[681]; ZA[920] = (ZCh(ZA[918], ZA[914], ZA[909]) + ZA[919]) + ZR26(ZA[918]); ZA[735] = ZR15(ZA[730]) + ZA[734]; ZA[921] = ZA[911] + ZA[909] + ZA[733]; ZA[916] = ZA[913] + ZA[915]; ZA[922] = (ZCh(ZA[920], ZA[918], ZA[914]) + ZA[921]) + ZR26(ZA[920]); ZA[923] = ZA[916] + ZA[914] + ZA[735]; ZA[924] = (ZCh(ZA[922], ZA[920], ZA[918]) + ZA[923]) + ZR26(ZA[922]); #define FOUND (0x0F) #define SETFOUND(Xnonce) output[output[FOUND]++] = Xnonce #if defined(VECTORS4) bool result = any(ZA[924] == K[79]); if (result) { if (ZA[924].x == K[79]) SETFOUND(Znonce.x); if (ZA[924].y == K[79]) SETFOUND(Znonce.y); if (ZA[924].z == K[79]) SETFOUND(Znonce.z); if (ZA[924].w == K[79]) SETFOUND(Znonce.w); } #elif defined(VECTORS2) bool result = any(ZA[924] == K[79]); if (result) { if (ZA[924].x == K[79]) SETFOUND(Znonce.x); if (ZA[924].y == K[79]) SETFOUND(Znonce.y); } #else if (ZA[924] == K[79]) SETFOUND(Znonce); #endif } bfgminer-bfgminer-3.10.0/diakgcn121016.cl000066400000000000000000000741221226556647300175740ustar00rootroot00000000000000// DiaKGCN 27-04-2012 - OpenCL kernel by Diapolo // // Parts and / or ideas for this kernel are based upon the public-domain poclbm project, the phatk kernel by Phateus and the DiabloMiner kernel by DiabloD3. // The kernel was rewritten by me (Diapolo) and is still public-domain! #ifdef VECTORS4 typedef uint4 u; #elif defined VECTORS2 typedef uint2 u; #else typedef uint u; #endif #ifdef BITALIGN #pragma OPENCL EXTENSION cl_amd_media_ops : enable #ifdef BFI_INT #define ch(x, y, z) amd_bytealign(x, y, z) #define ma(x, y, z) amd_bytealign(z ^ x, y, x) #else #define ch(x, y, z) bitselect(z, y, x) #define ma(z, x, y) bitselect(z, y, z ^ x) #endif #else #define ch(x, y, z) (z ^ (x & (y ^ z))) #define ma(x, y, z) ((x & z) | (y & (x | z))) #endif #define rotr15(n) (rotate(n, 15U) ^ rotate(n, 13U) ^ (n >> 10U)) #define rotr25(n) (rotate(n, 25U) ^ rotate(n, 14U) ^ (n >> 3U)) #define rotr26(n) (rotate(n, 26U) ^ rotate(n, 21U) ^ rotate(n, 7U)) #define rotr30(n) (rotate(n, 30U) ^ rotate(n, 19U) ^ rotate(n, 10U)) __kernel __attribute__((reqd_work_group_size(WORKSIZE, 1, 1))) void search( #ifndef GOFFSET const u base, #endif const uint PreVal0, const uint PreVal4, const uint H1, const uint D1A, const uint B1, const uint C1, const uint F1, const uint G1, const uint C1addK5, const uint B1addK6, const uint PreVal0addK7, const uint W16addK16, const uint W17addK17, const uint PreW18, const uint PreW19, const uint W16, const uint W17, const uint PreW31, const uint PreW32, const uint state0, const uint state1, const uint state2, const uint state3, const uint state4, const uint state5, const uint state6, const uint state7, const uint state0A, const uint state0B, const uint state1A, const uint state2A, const uint state3A, const uint state4A, const uint state5A, const uint state6A, const uint state7A, volatile __global uint * output) { u V[8]; u W[16]; #ifdef VECTORS4 const u nonce = (uint)(get_local_id(0)) * 4U + (uint)(get_group_id(0)) * (uint)(WORKVEC) + base; #elif defined VECTORS2 const u nonce = (uint)(get_local_id(0)) * 2U + (uint)(get_group_id(0)) * (uint)(WORKVEC) + base; #else #ifdef GOFFSET const u nonce = (uint)(get_global_id(0)); #else const u nonce = (uint)(get_local_id(0)) + (uint)(get_group_id(0)) * (uint)(WORKSIZE) + base; #endif #endif V[0] = PreVal0 + nonce; V[1] = B1; V[2] = C1; V[3] = D1A; V[4] = PreVal4 + nonce; V[5] = F1; V[6] = G1; V[7] = H1; V[7] += V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += C1addK5 + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = C1addK5 + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += B1addK6 + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = B1addK6 + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += PreVal0addK7 + nonce + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = PreVal0addK7 + nonce + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0xd807aa98U + V[7] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0xd807aa98U + V[7] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x12835b01U + V[6] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0x12835b01U + V[6] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0x243185beU + V[5] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x243185beU + V[5] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x550c7dc3U + V[4] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x550c7dc3U + V[4] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x72be5d74U + V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x72be5d74U + V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x80deb1feU + V[2] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x80deb1feU + V[2] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x9bdc06a7U + V[1] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x9bdc06a7U + V[1] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0xc19bf3f4U + V[0] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0xc19bf3f4U + V[0] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += W16addK16 + V[7] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = W16addK16 + V[7] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += W17addK17 + V[6] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = W17addK17 + V[6] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); //---------------------------------------------------------------------------------- #ifdef VECTORS4 W[0] = PreW18 + (u)(rotr25(nonce.x), rotr25(nonce.x) ^ 0x2004000U, rotr25(nonce.x) ^ 0x4008000U, rotr25(nonce.x) ^ 0x600c000U); #elif defined VECTORS2 W[0] = PreW18 + (u)(rotr25(nonce.x), rotr25(nonce.x) ^ 0x2004000U); #else W[0] = PreW18 + rotr25(nonce); #endif W[1] = PreW19 + nonce; W[2] = 0x80000000U + rotr15(W[0]); W[3] = rotr15(W[1]); W[4] = 0x00000280U + rotr15(W[2]); W[5] = W16 + rotr15(W[3]); W[6] = W17 + rotr15(W[4]); W[7] = W[0] + rotr15(W[5]); W[8] = W[1] + rotr15(W[6]); W[9] = W[2] + rotr15(W[7]); W[10] = W[3] + rotr15(W[8]); W[11] = W[4] + rotr15(W[9]); W[12] = W[5] + 0x00a00055U + rotr15(W[10]); W[13] = W[6] + PreW31 + rotr15(W[11]); W[14] = W[7] + PreW32 + rotr15(W[12]); W[15] = W[8] + W17 + rotr15(W[13]) + rotr25(W[0]); V[1] += 0x0fc19dc6U + V[5] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + W[0]; V[5] = 0x0fc19dc6U + V[5] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + W[0] + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x240ca1ccU + V[4] + W[1] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x240ca1ccU + V[4] + W[1] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x2de92c6fU + V[3] + W[2] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x2de92c6fU + V[3] + W[2] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x4a7484aaU + V[2] + W[3] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x4a7484aaU + V[2] + W[3] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x5cb0a9dcU + V[1] + W[4] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x5cb0a9dcU + V[1] + W[4] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x76f988daU + V[0] + W[5] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x76f988daU + V[0] + W[5] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0x983e5152U + V[7] + W[6] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x983e5152U + V[7] + W[6] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0xa831c66dU + V[6] + W[7] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0xa831c66dU + V[6] + W[7] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0xb00327c8U + V[5] + W[8] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0xb00327c8U + V[5] + W[8] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0xbf597fc7U + V[4] + W[9] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0xbf597fc7U + V[4] + W[9] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0xc6e00bf3U + V[3] + W[10] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0xc6e00bf3U + V[3] + W[10] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0xd5a79147U + V[2] + W[11] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0xd5a79147U + V[2] + W[11] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x06ca6351U + V[1] + W[12] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x06ca6351U + V[1] + W[12] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x14292967U + V[0] + W[13] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x14292967U + V[0] + W[13] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0x27b70a85U + V[7] + W[14] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x27b70a85U + V[7] + W[14] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x2e1b2138U + V[6] + W[15] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0x2e1b2138U + V[6] + W[15] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); //---------------------------------------------------------------------------------- W[0] = W[0] + W[9] + rotr15(W[14]) + rotr25( W[1]); W[1] = W[1] + W[10] + rotr15(W[15]) + rotr25( W[2]); W[2] = W[2] + W[11] + rotr15( W[0]) + rotr25( W[3]); W[3] = W[3] + W[12] + rotr15( W[1]) + rotr25( W[4]); W[4] = W[4] + W[13] + rotr15( W[2]) + rotr25( W[5]); W[5] = W[5] + W[14] + rotr15( W[3]) + rotr25( W[6]); W[6] = W[6] + W[15] + rotr15( W[4]) + rotr25( W[7]); W[7] = W[7] + W[0] + rotr15( W[5]) + rotr25( W[8]); W[8] = W[8] + W[1] + rotr15( W[6]) + rotr25( W[9]); W[9] = W[9] + W[2] + rotr15( W[7]) + rotr25(W[10]); W[10] = W[10] + W[3] + rotr15( W[8]) + rotr25(W[11]); W[11] = W[11] + W[4] + rotr15( W[9]) + rotr25(W[12]); W[12] = W[12] + W[5] + rotr15(W[10]) + rotr25(W[13]); W[13] = W[13] + W[6] + rotr15(W[11]) + rotr25(W[14]); W[14] = W[14] + W[7] + rotr15(W[12]) + rotr25(W[15]); W[15] = W[15] + W[8] + rotr15(W[13]) + rotr25( W[0]); V[1] += 0x4d2c6dfcU + V[5] + W[0] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x4d2c6dfcU + V[5] + W[0] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x53380d13U + V[4] + W[1] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x53380d13U + V[4] + W[1] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x650a7354U + V[3] + W[2] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x650a7354U + V[3] + W[2] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x766a0abbU + V[2] + W[3] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x766a0abbU + V[2] + W[3] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x81c2c92eU + V[1] + W[4] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x81c2c92eU + V[1] + W[4] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x92722c85U + V[0] + W[5] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x92722c85U + V[0] + W[5] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0xa2bfe8a1U + V[7] + W[6] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0xa2bfe8a1U + V[7] + W[6] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0xa81a664bU + V[6] + W[7] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0xa81a664bU + V[6] + W[7] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0xc24b8b70U + V[5] + W[8] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0xc24b8b70U + V[5] + W[8] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0xc76c51a3U + V[4] + W[9] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0xc76c51a3U + V[4] + W[9] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0xd192e819U + V[3] + W[10] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0xd192e819U + V[3] + W[10] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0xd6990624U + V[2] + W[11] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0xd6990624U + V[2] + W[11] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0xf40e3585U + V[1] + W[12] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0xf40e3585U + V[1] + W[12] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x106aa070U + V[0] + W[13] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x106aa070U + V[0] + W[13] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0x19a4c116U + V[7] + W[14] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x19a4c116U + V[7] + W[14] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x1e376c08U + V[6] + W[15] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0x1e376c08U + V[6] + W[15] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); //---------------------------------------------------------------------------------- W[0] = W[0] + W[9] + rotr15(W[14]) + rotr25( W[1]); W[1] = W[1] + W[10] + rotr15(W[15]) + rotr25( W[2]); W[2] = W[2] + W[11] + rotr15( W[0]) + rotr25( W[3]); W[3] = W[3] + W[12] + rotr15( W[1]) + rotr25( W[4]); W[4] = W[4] + W[13] + rotr15( W[2]) + rotr25( W[5]); W[5] = W[5] + W[14] + rotr15( W[3]) + rotr25( W[6]); W[6] = W[6] + W[15] + rotr15( W[4]) + rotr25( W[7]); W[7] = W[7] + W[0] + rotr15( W[5]) + rotr25( W[8]); W[8] = W[8] + W[1] + rotr15( W[6]) + rotr25( W[9]); W[9] = W[9] + W[2] + rotr15( W[7]) + rotr25(W[10]); W[10] = W[10] + W[3] + rotr15( W[8]) + rotr25(W[11]); W[11] = W[11] + W[4] + rotr15( W[9]) + rotr25(W[12]); W[12] = W[12] + W[5] + rotr15(W[10]) + rotr25(W[13]); W[13] = W[13] + W[6] + rotr15(W[11]) + rotr25(W[14]); V[1] += 0x2748774cU + V[5] + W[0] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x2748774cU + V[5] + W[0] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x34b0bcb5U + V[4] + W[1] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x34b0bcb5U + V[4] + W[1] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x391c0cb3U + V[3] + W[2] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x391c0cb3U + V[3] + W[2] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x4ed8aa4aU + V[2] + W[3] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x4ed8aa4aU + V[2] + W[3] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x5b9cca4fU + V[1] + W[4] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x5b9cca4fU + V[1] + W[4] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x682e6ff3U + V[0] + W[5] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x682e6ff3U + V[0] + W[5] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0x748f82eeU + V[7] + W[6] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x748f82eeU + V[7] + W[6] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x78a5636fU + V[6] + W[7] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0x78a5636fU + V[6] + W[7] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0x84c87814U + V[5] + W[8] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x84c87814U + V[5] + W[8] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x8cc70208U + V[4] + W[9] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x8cc70208U + V[4] + W[9] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x90befffaU + V[3] + W[10] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x90befffaU + V[3] + W[10] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0xa4506cebU + V[2] + W[11] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0xa4506cebU + V[2] + W[11] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0xbef9a3f7U + V[1] + W[12] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0xbef9a3f7U + V[1] + W[12] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0xc67178f2U + V[0] + W[13] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0xc67178f2U + V[0] + W[13] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); //---------------------------------------------------------------------------------- W[0] = state0 + V[0] + rotr25(state1 + V[1]); W[1] = state1 + V[1] + 0x00a00000U + rotr25(state2 + V[2]); W[2] = state2 + V[2] + rotr15(W[0]) + rotr25(state3 + V[3]); W[3] = state3 + V[3] + rotr15(W[1]) + rotr25(state4 + V[4]); W[4] = state4 + V[4] + rotr15(W[2]) + rotr25(state5 + V[5]); W[5] = state5 + V[5] + rotr15(W[3]) + rotr25(state6 + V[6]); W[6] = state6 + V[6] + 0x00000100U + rotr15(W[4]) + rotr25(state7 + V[7]); W[7] = state7 + V[7] + W[0] + 0x11002000U + rotr15(W[5]); W[8] = W[1] + 0x80000000U + rotr15(W[6]); W[9] = W[2] + rotr15(W[7]); W[10] = W[3] + rotr15(W[8]); W[11] = W[4] + rotr15(W[9]); W[12] = W[5] + rotr15(W[10]); W[13] = W[6] + rotr15(W[11]); W[14] = W[7] + 0x00400022U + rotr15(W[12]); W[15] = W[8] + 0x00000100U + rotr15(W[13]) + rotr25(W[0]); // 0x71374491U + 0x1f83d9abU + state1 const u state1AaddV1 = state1A + V[1]; // 0xb5c0fbcfU + 0x9b05688cU + state2 const u state2AaddV2 = state2A + V[2]; // 0x510e527fU + 0xe9b5dba5U + state3 const u state3AaddV3 = state3A + V[3]; // 0x3956c25bU + state4 const u state4AaddV4 = state4A + V[4]; // 0x59f111f1U + state5 const u state5AaddV5 = state5A + V[5]; // 0x923f82a4U + state6 const u state6AaddV6 = state6A + V[6]; // 0xab1c5ed5U + state7 const u state7AaddV7 = state7A + V[7]; // 0x98c7e2a2U + state0 V[3] = state0A + V[0]; // 0xfc08884dU + state0 V[7] = state0B + V[0]; V[0] = 0x6a09e667U; V[1] = 0xbb67ae85U; V[2] = 0x3c6ef372U; V[4] = 0x510e527fU; V[5] = 0x9b05688cU; V[6] = 0x1f83d9abU; V[2] += state1AaddV1 + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = state1AaddV1 + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += state2AaddV2 + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = state2AaddV2 + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += state3AaddV3 + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = state3AaddV3 + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += state4AaddV4 + V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = state4AaddV4 + V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += state5AaddV5 + V[2] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = state5AaddV5 + V[2] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += state6AaddV6 + V[1] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = state6AaddV6 + V[1] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += state7AaddV7 + V[0] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = state7AaddV7 + V[0] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0x5807aa98U + V[7] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x5807aa98U + V[7] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x12835b01U + V[6] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0x12835b01U + V[6] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0x243185beU + V[5] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x243185beU + V[5] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x550c7dc3U + V[4] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x550c7dc3U + V[4] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x72be5d74U + V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x72be5d74U + V[3] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x80deb1feU + V[2] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x80deb1feU + V[2] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x9bdc06a7U + V[1] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x9bdc06a7U + V[1] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0xc19bf274U + V[0] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0xc19bf274U + V[0] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0xe49b69c1U + V[7] + W[0] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0xe49b69c1U + V[7] + W[0] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0xefbe4786U + V[6] + W[1] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0xefbe4786U + V[6] + W[1] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0x0fc19dc6U + V[5] + W[2] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x0fc19dc6U + V[5] + W[2] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x240ca1ccU + V[4] + W[3] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x240ca1ccU + V[4] + W[3] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x2de92c6fU + V[3] + W[4] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x2de92c6fU + V[3] + W[4] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x4a7484aaU + V[2] + W[5] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x4a7484aaU + V[2] + W[5] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x5cb0a9dcU + V[1] + W[6] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x5cb0a9dcU + V[1] + W[6] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x76f988daU + V[0] + W[7] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x76f988daU + V[0] + W[7] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0x983e5152U + V[7] + W[8] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x983e5152U + V[7] + W[8] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0xa831c66dU + V[6] + W[9] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0xa831c66dU + V[6] + W[9] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0xb00327c8U + V[5] + W[10] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0xb00327c8U + V[5] + W[10] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0xbf597fc7U + V[4] + W[11] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0xbf597fc7U + V[4] + W[11] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0xc6e00bf3U + V[3] + W[12] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0xc6e00bf3U + V[3] + W[12] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0xd5a79147U + V[2] + W[13] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0xd5a79147U + V[2] + W[13] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x06ca6351U + V[1] + W[14] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x06ca6351U + V[1] + W[14] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x14292967U + V[0] + W[15] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x14292967U + V[0] + W[15] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); //---------------------------------------------------------------------------------- W[0] = W[0] + W[9] + rotr15(W[14]) + rotr25( W[1]); W[1] = W[1] + W[10] + rotr15(W[15]) + rotr25( W[2]); W[2] = W[2] + W[11] + rotr15( W[0]) + rotr25( W[3]); W[3] = W[3] + W[12] + rotr15( W[1]) + rotr25( W[4]); W[4] = W[4] + W[13] + rotr15( W[2]) + rotr25( W[5]); W[5] = W[5] + W[14] + rotr15( W[3]) + rotr25( W[6]); W[6] = W[6] + W[15] + rotr15( W[4]) + rotr25( W[7]); W[7] = W[7] + W[0] + rotr15( W[5]) + rotr25( W[8]); W[8] = W[8] + W[1] + rotr15( W[6]) + rotr25( W[9]); W[9] = W[9] + W[2] + rotr15( W[7]) + rotr25(W[10]); W[10] = W[10] + W[3] + rotr15( W[8]) + rotr25(W[11]); W[11] = W[11] + W[4] + rotr15( W[9]) + rotr25(W[12]); W[12] = W[12] + W[5] + rotr15(W[10]) + rotr25(W[13]); W[13] = W[13] + W[6] + rotr15(W[11]) + rotr25(W[14]); W[14] = W[14] + W[7] + rotr15(W[12]) + rotr25(W[15]); W[15] = W[15] + W[8] + rotr15(W[13]) + rotr25( W[0]); V[3] += 0x27b70a85U + V[7] + W[0] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x27b70a85U + V[7] + W[0] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x2e1b2138U + V[6] + W[1] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0x2e1b2138U + V[6] + W[1] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0x4d2c6dfcU + V[5] + W[2] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x4d2c6dfcU + V[5] + W[2] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x53380d13U + V[4] + W[3] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x53380d13U + V[4] + W[3] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x650a7354U + V[3] + W[4] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x650a7354U + V[3] + W[4] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x766a0abbU + V[2] + W[5] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x766a0abbU + V[2] + W[5] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x81c2c92eU + V[1] + W[6] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x81c2c92eU + V[1] + W[6] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x92722c85U + V[0] + W[7] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x92722c85U + V[0] + W[7] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0xa2bfe8a1U + V[7] + W[8] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0xa2bfe8a1U + V[7] + W[8] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0xa81a664bU + V[6] + W[9] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0xa81a664bU + V[6] + W[9] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0xc24b8b70U + V[5] + W[10] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0xc24b8b70U + V[5] + W[10] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0xc76c51a3U + V[4] + W[11] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0xc76c51a3U + V[4] + W[11] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0xd192e819U + V[3] + W[12] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0xd192e819U + V[3] + W[12] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0xd6990624U + V[2] + W[13] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0xd6990624U + V[2] + W[13] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0xf40e3585U + V[1] + W[14] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0xf40e3585U + V[1] + W[14] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x106aa070U + V[0] + W[15] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x106aa070U + V[0] + W[15] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); //---------------------------------------------------------------------------------- W[0] = W[0] + W[9] + rotr15(W[14]) + rotr25( W[1]); W[1] = W[1] + W[10] + rotr15(W[15]) + rotr25( W[2]); W[2] = W[2] + W[11] + rotr15( W[0]) + rotr25( W[3]); W[3] = W[3] + W[12] + rotr15( W[1]) + rotr25( W[4]); W[4] = W[4] + W[13] + rotr15( W[2]) + rotr25( W[5]); W[5] = W[5] + W[14] + rotr15( W[3]) + rotr25( W[6]); W[6] = W[6] + W[15] + rotr15( W[4]) + rotr25( W[7]); W[7] = W[7] + W[0] + rotr15( W[5]) + rotr25( W[8]); W[8] = W[8] + W[1] + rotr15( W[6]) + rotr25( W[9]); W[9] = W[9] + W[2] + rotr15( W[7]) + rotr25(W[10]); W[10] = W[10] + W[3] + rotr15( W[8]) + rotr25(W[11]); W[11] = W[11] + W[4] + rotr15( W[9]) + rotr25(W[12]); W[12] = W[12] + W[5] + rotr15(W[10]) + rotr25(W[13]); V[3] += 0x19a4c116U + V[7] + W[0] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x19a4c116U + V[7] + W[0] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x1e376c08U + V[6] + W[1] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[6] = 0x1e376c08U + V[6] + W[1] + ch(V[3], V[4], V[5]) + rotr26(V[3]) + rotr30(V[7]) + ma(V[0], V[1], V[7]); V[1] += 0x2748774cU + V[5] + W[2] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[5] = 0x2748774cU + V[5] + W[2] + ch(V[2], V[3], V[4]) + rotr26(V[2]) + rotr30(V[6]) + ma(V[7], V[0], V[6]); V[0] += 0x34b0bcb5U + V[4] + W[3] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[4] = 0x34b0bcb5U + V[4] + W[3] + ch(V[1], V[2], V[3]) + rotr26(V[1]) + rotr30(V[5]) + ma(V[6], V[7], V[5]); V[7] += 0x391c0cb3U + V[3] + W[4] + ch(V[0], V[1], V[2]) + rotr26(V[0]); V[3] = 0x391c0cb3U + V[3] + W[4] + ch(V[0], V[1], V[2]) + rotr26(V[0]) + rotr30(V[4]) + ma(V[5], V[6], V[4]); V[6] += 0x4ed8aa4aU + V[2] + W[5] + ch(V[7], V[0], V[1]) + rotr26(V[7]); V[2] = 0x4ed8aa4aU + V[2] + W[5] + ch(V[7], V[0], V[1]) + rotr26(V[7]) + rotr30(V[3]) + ma(V[4], V[5], V[3]); V[5] += 0x5b9cca4fU + V[1] + W[6] + ch(V[6], V[7], V[0]) + rotr26(V[6]); V[1] = 0x5b9cca4fU + V[1] + W[6] + ch(V[6], V[7], V[0]) + rotr26(V[6]) + rotr30(V[2]) + ma(V[3], V[4], V[2]); V[4] += 0x682e6ff3U + V[0] + W[7] + ch(V[5], V[6], V[7]) + rotr26(V[5]); V[0] = 0x682e6ff3U + V[0] + W[7] + ch(V[5], V[6], V[7]) + rotr26(V[5]) + rotr30(V[1]) + ma(V[2], V[3], V[1]); V[3] += 0x748f82eeU + V[7] + W[8] + ch(V[4], V[5], V[6]) + rotr26(V[4]); V[7] = 0x748f82eeU + V[7] + W[8] + ch(V[4], V[5], V[6]) + rotr26(V[4]) + rotr30(V[0]) + ma(V[1], V[2], V[0]); V[2] += 0x78a5636fU + V[6] + W[9] + ch(V[3], V[4], V[5]) + rotr26(V[3]); V[1] += 0x84c87814U + V[5] + W[10] + ch(V[2], V[3], V[4]) + rotr26(V[2]); V[0] += 0x8cc70208U + V[4] + W[11] + ch(V[1], V[2], V[3]) + rotr26(V[1]); V[7] += V[3] + W[12] + ch(V[0], V[1], V[2]) + rotr26(V[0]); #define FOUND (0x0F) #define SETFOUND(Xnonce) output[output[FOUND]++] = Xnonce #ifdef VECTORS4 if ((V[7].x == 0x136032edU) ^ (V[7].y == 0x136032edU) ^ (V[7].z == 0x136032edU) ^ (V[7].w == 0x136032edU)) { if (V[7].x == 0x136032edU) SETFOUND(nonce.x); if (V[7].y == 0x136032edU) SETFOUND(nonce.y); if (V[7].z == 0x136032edU) SETFOUND(nonce.z); if (V[7].w == 0x136032edU) SETFOUND(nonce.w); } #elif defined VECTORS2 if ((V[7].x == 0x136032edU) + (V[7].y == 0x136032edU)) { if (V[7].x == 0x136032edU) SETFOUND(nonce.x); if (V[7].y == 0x136032edU) SETFOUND(nonce.y); } #else if (V[7] == 0x136032edU) SETFOUND(nonce); #endif } bfgminer-bfgminer-3.10.0/driver-antminer.c000066400000000000000000000127061226556647300204530ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * Copyright 2013 Nate Woolls * Copyright 2013 Lingchao Xu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include "miner.h" #include "icarus-common.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "deviceapi.h" #include "logging.h" #include "util.h" #define ANTMINER_IO_SPEED 115200 #define ANTMINER_HASH_TIME 0.0000000004761 #define ANTMINER_STATUS_LEN 5 #define ANTMINER_COMMAND_PREFIX 128 #define ANTMINER_COMMAND_LED 1 #define ANTMINER_COMMAND_ON 1 #define ANTMINER_COMMAND_OFFSET 32 BFG_REGISTER_DRIVER(antminer_drv) static bool antminer_detect_one(const char *devpath) { struct device_drv *drv = &antminer_drv; struct ICARUS_INFO *info = calloc(1, sizeof(struct ICARUS_INFO)); if (unlikely(!info)) quit(1, "Failed to malloc ICARUS_INFO"); *info = (struct ICARUS_INFO){ .baud = ANTMINER_IO_SPEED, .Hs = ANTMINER_HASH_TIME, .timing_mode = MODE_DEFAULT, .read_size = 5, }; if (!icarus_detect_custom(devpath, drv, info)) { free(info); return false; } info->read_count = 15; return true; } static bool antminer_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, antminer_detect_one); } static char *antminer_get_clock(struct cgpu_info *cgpu, char *replybuf) { uint8_t rdreg_buf[4] = {0}; unsigned char rebuf[ANTMINER_STATUS_LEN] = {0}; struct timeval tv_now; rdreg_buf[0] = 4; rdreg_buf[0] |= 0x80; rdreg_buf[1] = 0; //16-23 rdreg_buf[2] = 0x04; // 8-15 rdreg_buf[3] = crc5usb(rdreg_buf, 27); applog(LOG_DEBUG, "%"PRIpreprv": Get clock: %02x%02x%02x%02x", cgpu->proc_repr, rdreg_buf[0], rdreg_buf[1], rdreg_buf[2], rdreg_buf[3]); timer_set_now(&tv_now); int err = icarus_write(cgpu->device_fd, rdreg_buf, sizeof(rdreg_buf)); if (err != 0) { sprintf(replybuf, "invalid send get clock: comms error (err=%d)", err); return replybuf; } applog(LOG_DEBUG, "%"PRIpreprv": Get clock: OK", cgpu->proc_repr); memset(rebuf, 0, sizeof(rebuf)); err = icarus_gets(rebuf, cgpu->device_fd, &tv_now, NULL, 10, ANTMINER_STATUS_LEN); // Timeout is ok - checking specifically for an error here if (err == ICA_GETS_ERROR) { sprintf(replybuf, "invalid recv get clock: comms error (err=%d)", err); return replybuf; } applog(LOG_DEBUG, "%"PRIpreprv": Get clock: %02x%02x%02x%02x%02x", cgpu->proc_repr, rebuf[0], rebuf[1], rebuf[2], rebuf[3], rebuf[4]); return NULL; } static char *antminer_set_clock(struct cgpu_info *cgpu, char *setting, char *replybuf) { if (!setting || !*setting) return "missing clock setting"; // For now we only allow hex values that use BITMAINtech's lookup table // This means values should be prefixed with an x so that later we can // accept and distinguish decimal values if (setting[0] != 'x') { sprintf(replybuf, "invalid clock: '%s' data must be prefixed with an x", setting); return replybuf; } //remove leading character char *hex_setting = setting + 1; uint8_t reg_data[4] = {0}; if (!hex2bin(reg_data, hex_setting, strlen(hex_setting) / 2)) { sprintf(replybuf, "invalid clock: '%s' data must be a hexidecimal value", hex_setting); return replybuf; } uint8_t cmd_buf[4] = {0}; cmd_buf[0] = 2; cmd_buf[0] |= 0x80; cmd_buf[1] = reg_data[0]; //16-23 cmd_buf[2] = reg_data[1]; // 8-15 cmd_buf[3] = crc5usb(cmd_buf, 27); applog(LOG_DEBUG, "%"PRIpreprv": Set clock: %02x%02x%02x%02x", cgpu->proc_repr, cmd_buf[0], cmd_buf[1], cmd_buf[2], cmd_buf[3]); int err = icarus_write(cgpu->device_fd, cmd_buf, sizeof(cmd_buf)); if (err != 0) { sprintf(replybuf, "invalid send clock: '%s' comms error (err=%d)", setting, err); return replybuf; } applog(LOG_DEBUG, "%"PRIpreprv": Set clock: OK", cgpu->proc_repr); // This is confirmed required in order for the clock change to "take" cgsleep_ms(500); return antminer_get_clock(cgpu, replybuf); } static char *antminer_set_device(struct cgpu_info *cgpu, char *option, char *setting, char *replybuf) { if (strcasecmp(option, "clock") == 0) { return antminer_set_clock(cgpu, setting, replybuf); } sprintf(replybuf, "Unknown option: %s", option); return replybuf; } static void antminer_flash_led(const struct cgpu_info *antminer) { const int offset = ANTMINER_COMMAND_OFFSET; uint8_t cmd_buf[4 + offset]; memset(cmd_buf, 0, sizeof(cmd_buf)); cmd_buf[offset + 0] = ANTMINER_COMMAND_PREFIX; cmd_buf[offset + 1] = ANTMINER_COMMAND_LED; cmd_buf[offset + 2] = ANTMINER_COMMAND_ON; cmd_buf[offset + 3] = crc5usb(cmd_buf, sizeof(cmd_buf)); const int fd = antminer->device_fd; icarus_write(fd, (char *)(&cmd_buf), sizeof(cmd_buf)); } static bool antminer_identify(struct cgpu_info *antminer) { for (int i = 0; i < 10; i++) { antminer_flash_led(antminer); cgsleep_ms(250); } return true; } static void antminer_drv_init() { antminer_drv = icarus_drv; antminer_drv.dname = "antminer"; antminer_drv.name = "AMU"; antminer_drv.lowl_probe = antminer_lowl_probe; antminer_drv.set_device = antminer_set_device, antminer_drv.identify_device = antminer_identify; ++antminer_drv.probe_priority; } struct device_drv antminer_drv = { .drv_init = antminer_drv_init, }; bfgminer-bfgminer-3.10.0/driver-avalon.c000066400000000000000000000612261226556647300201170ustar00rootroot00000000000000/* * Copyright 2012-2013 Xiangfu * Copyright 2013 Con Kolivas * Copyright 2012-2013 Luke Dashjr * Copyright 2012 Andrew Smith * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #include #include #include #ifndef O_CLOEXEC #define O_CLOEXEC 0 #endif #else #include "compat.h" #include #include #endif #include "deviceapi.h" #include "miner.h" #include "driver-avalon.h" #include "logging.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "util.h" BFG_REGISTER_DRIVER(avalon_drv) static int option_offset = -1; static int avalon_init_task(struct avalon_task *at, uint8_t reset, uint8_t ff, uint8_t fan, uint8_t timeout, uint8_t asic_num, uint8_t miner_num, uint8_t nonce_elf, uint8_t gate_miner, int frequency) { uint8_t *buf; static bool first = true; if (unlikely(!at)) return -1; if (unlikely(timeout <= 0 || asic_num <= 0 || miner_num <= 0)) return -1; memset(at, 0, sizeof(struct avalon_task)); if (unlikely(reset)) { at->reset = 1; at->fan_eft = 1; at->timer_eft = 1; first = true; } at->flush_fifo = (ff ? 1 : 0); at->fan_eft = (fan ? 1 : 0); if (unlikely(first && !at->reset)) { at->fan_eft = 1; at->timer_eft = 1; first = false; } at->fan_pwm_data = (fan ? fan : AVALON_DEFAULT_FAN_MAX_PWM); at->timeout_data = timeout; at->asic_num = asic_num; at->miner_num = miner_num; at->nonce_elf = nonce_elf; at->gate_miner_elf = 1; at->asic_pll = 1; if (unlikely(gate_miner)) { at-> gate_miner = 1; at->asic_pll = 0; } buf = (uint8_t *)at; buf[5] = 0x00; buf[8] = 0x74; buf[9] = 0x01; buf[10] = 0x00; buf[11] = 0x00; switch (frequency) { case 256: buf[6] = 0x03; buf[7] = 0x08; break; default: case 270: buf[6] = 0x73; buf[7] = 0x08; break; case 282: buf[6] = 0xd3; buf[7] = 0x08; break; case 300: buf[6] = 0x63; buf[7] = 0x09; break; case 325: buf[6] = 0x28; buf[7] = 0x0a; break; case 350: buf[6] = 0xf0; buf[7] = 0x0a; break; case 375: buf[6] = 0xb8; buf[7] = 0x0b; break; } return 0; } static inline void avalon_create_task(struct avalon_task *at, struct work *work) { memcpy(at->midstate, work->midstate, 32); memcpy(at->data, work->data + 64, 12); } static int avalon_send_task(int fd, const struct avalon_task *at, struct cgpu_info *avalon) { size_t ret; int full; struct timespec p; uint8_t buf[AVALON_WRITE_SIZE + 4 * AVALON_DEFAULT_ASIC_NUM]; size_t nr_len; struct avalon_info *info; uint64_t delay = 32000000; /* Default 32ms for B19200 */ uint32_t nonce_range; int i; if (at->nonce_elf) nr_len = AVALON_WRITE_SIZE + 4 * at->asic_num; else nr_len = AVALON_WRITE_SIZE; memcpy(buf, at, AVALON_WRITE_SIZE); if (at->nonce_elf) { nonce_range = (uint32_t)0xffffffff / at->asic_num; for (i = 0; i < at->asic_num; i++) { buf[AVALON_WRITE_SIZE + (i * 4) + 3] = (i * nonce_range & 0xff000000) >> 24; buf[AVALON_WRITE_SIZE + (i * 4) + 2] = (i * nonce_range & 0x00ff0000) >> 16; buf[AVALON_WRITE_SIZE + (i * 4) + 1] = (i * nonce_range & 0x0000ff00) >> 8; buf[AVALON_WRITE_SIZE + (i * 4) + 0] = (i * nonce_range & 0x000000ff) >> 0; } } #if defined(__BIG_ENDIAN__) || defined(MIPSEB) uint8_t tt = 0; tt = (buf[0] & 0x0f) << 4; tt |= ((buf[0] & 0x10) ? (1 << 3) : 0); tt |= ((buf[0] & 0x20) ? (1 << 2) : 0); tt |= ((buf[0] & 0x40) ? (1 << 1) : 0); tt |= ((buf[0] & 0x80) ? (1 << 0) : 0); buf[0] = tt; tt = (buf[4] & 0x0f) << 4; tt |= ((buf[4] & 0x10) ? (1 << 3) : 0); tt |= ((buf[4] & 0x20) ? (1 << 2) : 0); tt |= ((buf[4] & 0x40) ? (1 << 1) : 0); tt |= ((buf[4] & 0x80) ? (1 << 0) : 0); buf[4] = tt; #endif if (likely(avalon)) { info = avalon->device_data; delay = nr_len * 10 * 1000000000ULL; delay = delay / info->baud; } if (at->reset) nr_len = 1; if (opt_debug) { applog(LOG_DEBUG, "Avalon: Sent(%u):", (unsigned int)nr_len); hexdump((uint8_t *)buf, nr_len); } ret = write(fd, buf, nr_len); if (unlikely(ret != nr_len)) return AVA_SEND_ERROR; p.tv_sec = 0; p.tv_nsec = (long)delay + 4000000; nanosleep(&p, NULL); applog(LOG_DEBUG, "Avalon: Sent: Buffer delay: %ld", p.tv_nsec); full = avalon_buffer_full(fd); applog(LOG_DEBUG, "Avalon: Sent: Buffer full: %s", ((full == AVA_BUFFER_FULL) ? "Yes" : "No")); if (unlikely(full == AVA_BUFFER_FULL)) return AVA_SEND_BUFFER_FULL; return AVA_SEND_BUFFER_EMPTY; } static inline int avalon_gets(int fd, uint8_t *buf, int read_count, struct thr_info *thr, struct timeval *tv_finish) { ssize_t ret = 0; int rc = 0; int read_amount = AVALON_READ_SIZE; bool first = true; /* Read reply 1 byte at a time to get earliest tv_finish */ while (true) { ret = read(fd, buf, 1); if (ret < 0) { applog(LOG_ERR, "Avalon: Error on read in avalon_gets: %s", bfg_strerror(errno, BST_ERRNO)); return AVA_GETS_ERROR; } if (first && likely(tv_finish)) cgtime(tv_finish); if (ret >= read_amount) return AVA_GETS_OK; if (ret > 0) { buf += ret; read_amount -= ret; first = false; continue; } if (thr && thr->work_restart) { if (opt_debug) { applog(LOG_WARNING, "Avalon: Work restart at %.2f seconds", (float)(rc)/(float)AVALON_TIME_FACTOR); } return AVA_GETS_RESTART; } rc++; if (rc >= read_count) { if (opt_debug) { applog(LOG_WARNING, "Avalon: No data in %.2f seconds", (float)rc/(float)AVALON_TIME_FACTOR); } return AVA_GETS_TIMEOUT; } } } static int avalon_get_result(int fd, struct avalon_result *ar, struct thr_info *thr, struct timeval *tv_finish) { struct cgpu_info *avalon; struct avalon_info *info; uint8_t result[AVALON_READ_SIZE]; int ret, read_count; avalon = thr->cgpu; info = avalon->device_data; read_count = info->read_count; memset(result, 0, AVALON_READ_SIZE); ret = avalon_gets(fd, result, read_count, thr, tv_finish); if (ret == AVA_GETS_OK) { if (opt_debug) { applog(LOG_DEBUG, "Avalon: get:"); hexdump((uint8_t *)result, AVALON_READ_SIZE); } memcpy((uint8_t *)ar, result, AVALON_READ_SIZE); } return ret; } static bool avalon_decode_nonce(struct thr_info *thr, struct avalon_result *ar, uint32_t *nonce) { struct cgpu_info *avalon; struct avalon_info *info; struct work *work; avalon = thr->cgpu; if (unlikely(!avalon->works)) return false; work = clone_queued_work_bymidstate(avalon, (char *)ar->midstate, 32, (char *)ar->data, 64, 12); if (!work) return false; info = avalon->device_data; info->matching_work[work->subid]++; *nonce = htole32(ar->nonce); submit_nonce(thr, work, *nonce); free_work(work); return true; } static void avalon_get_reset(int fd, struct avalon_result *ar) { int ret; const int read_count = AVALON_RESET_FAULT_DECISECONDS * AVALON_TIME_FACTOR; memset(ar, 0, AVALON_READ_SIZE); ret = avalon_gets(fd, (uint8_t*)ar, read_count, NULL, NULL); if (ret == AVA_GETS_OK && opt_debug) { applog(LOG_DEBUG, "Avalon: get:"); hexdump((uint8_t *)ar, AVALON_READ_SIZE); } } static int avalon_reset(int fd, struct avalon_result *ar) { struct avalon_task at; uint8_t *buf; int ret, i = 0; struct timespec p; avalon_init_task(&at, 1, 0, AVALON_DEFAULT_FAN_MAX_PWM, AVALON_DEFAULT_TIMEOUT, AVALON_DEFAULT_ASIC_NUM, AVALON_DEFAULT_MINER_NUM, 0, 0, AVALON_DEFAULT_FREQUENCY); ret = avalon_send_task(fd, &at, NULL); if (ret == AVA_SEND_ERROR) return 1; avalon_get_reset(fd, ar); buf = (uint8_t *)ar; /* Sometimes there is one extra 0 byte for some reason in the buffer, * so work around it. */ if (buf[0] == 0) buf = (uint8_t *)(ar + 1); if (buf[0] == 0xAA && buf[1] == 0x55 && buf[2] == 0xAA && buf[3] == 0x55) { for (i = 4; i < 11; i++) if (buf[i] != 0) break; } p.tv_sec = 0; p.tv_nsec = AVALON_RESET_PITCH; nanosleep(&p, NULL); if (i != 11) { applog(LOG_ERR, "Avalon: Reset failed! not an Avalon?" " (%d: %02x %02x %02x %02x)", i, buf[0], buf[1], buf[2], buf[3]); /* FIXME: return 1; */ } else applog(LOG_WARNING, "Avalon: Reset succeeded"); return 0; } static void avalon_idle(struct cgpu_info *avalon) { int i, ret; struct avalon_task at; int fd = avalon->device_fd; struct avalon_info *info = avalon->device_data; int avalon_get_work_count = info->miner_count; i = 0; while (true) { avalon_init_task(&at, 0, 0, info->fan_pwm, info->timeout, info->asic_count, info->miner_count, 1, 1, info->frequency); ret = avalon_send_task(fd, &at, avalon); if (unlikely(ret == AVA_SEND_ERROR || (ret == AVA_SEND_BUFFER_EMPTY && (i + 1 == avalon_get_work_count * 2)))) { applog(LOG_ERR, "AVA%i: Comms error", avalon->device_id); return; } if (i + 1 == avalon_get_work_count * 2) break; if (ret == AVA_SEND_BUFFER_FULL) break; i++; } applog(LOG_ERR, "Avalon: Goto idle mode"); } static void get_options(int this_option_offset, int *baud, int *miner_count, int *asic_count, int *timeout, int *frequency) { char buf[BUFSIZ+1]; char *ptr, *comma, *colon, *colon2, *colon3, *colon4; size_t max; int i, tmp; if (opt_avalon_options == NULL) buf[0] = '\0'; else { ptr = opt_avalon_options; for (i = 0; i < this_option_offset; i++) { comma = strchr(ptr, ','); if (comma == NULL) break; ptr = comma + 1; } comma = strchr(ptr, ','); if (comma == NULL) max = strlen(ptr); else max = comma - ptr; if (max > BUFSIZ) max = BUFSIZ; strncpy(buf, ptr, max); buf[max] = '\0'; } *baud = AVALON_IO_SPEED; *miner_count = AVALON_DEFAULT_MINER_NUM - 8; *asic_count = AVALON_DEFAULT_ASIC_NUM; *timeout = AVALON_DEFAULT_TIMEOUT; *frequency = AVALON_DEFAULT_FREQUENCY; if (!(*buf)) return; colon = strchr(buf, ':'); if (colon) *(colon++) = '\0'; tmp = atoi(buf); if (!valid_baud(*baud = tmp)) quit(1, "Invalid avalon-options for baud (%s)", buf); if (colon && *colon) { colon2 = strchr(colon, ':'); if (colon2) *(colon2++) = '\0'; if (*colon) { tmp = atoi(colon); if (tmp > 0 && tmp <= AVALON_DEFAULT_MINER_NUM) { *miner_count = tmp; } else { quit(1, "Invalid avalon-options for " "miner_count (%s) must be 1 ~ %d", colon, AVALON_DEFAULT_MINER_NUM); } } if (colon2 && *colon2) { colon3 = strchr(colon2, ':'); if (colon3) *(colon3++) = '\0'; tmp = atoi(colon2); if (tmp > 0 && tmp <= AVALON_DEFAULT_ASIC_NUM) *asic_count = tmp; else { quit(1, "Invalid avalon-options for " "asic_count (%s) must be 1 ~ %d", colon2, AVALON_DEFAULT_ASIC_NUM); } if (colon3 && *colon3) { colon4 = strchr(colon3, ':'); if (colon4) *(colon4++) = '\0'; tmp = atoi(colon3); if (tmp > 0 && tmp <= 0xff) *timeout = tmp; else { quit(1, "Invalid avalon-options for " "timeout (%s) must be 1 ~ %d", colon3, 0xff); } if (colon4 && *colon4) { tmp = atoi(colon4); switch (tmp) { case 256: case 270: case 282: case 300: case 325: case 350: case 375: *frequency = tmp; break; default: quit(1, "Invalid avalon-options for " "frequency must be 256/270/282/300/325/350/375"); } } } } } } /* Non blocking clearing of anything in the buffer */ static void avalon_clear_readbuf(int fd) { ssize_t ret; do { char buf[AVALON_FTDI_READSIZE]; #ifndef WIN32 struct timeval timeout; fd_set rd; timeout.tv_sec = timeout.tv_usec = 0; FD_ZERO(&rd); FD_SET((SOCKETTYPE)fd, &rd); ret = select(fd + 1, &rd, NULL, NULL, &timeout); if (ret > 0) #endif // Relies on serial timeout for Windows ret = read(fd, buf, AVALON_FTDI_READSIZE); } while (ret > 0); } static bool avalon_detect_one(const char *devpath) { struct avalon_info *info; struct avalon_result ar; int fd, ret; int baud, miner_count, asic_count, timeout, frequency = 0; struct cgpu_info *avalon; if (serial_claim(devpath, &avalon_drv)) return false; int this_option_offset = ++option_offset; get_options(this_option_offset, &baud, &miner_count, &asic_count, &timeout, &frequency); applog(LOG_DEBUG, "Avalon Detect: Attempting to open %s " "(baud=%d miner_count=%d asic_count=%d timeout=%d frequency=%d)", devpath, baud, miner_count, asic_count, timeout, frequency); fd = avalon_open2(devpath, baud, true); if (unlikely(fd == -1)) { applog(LOG_ERR, "Avalon Detect: Failed to open %s", devpath); return false; } avalon_clear_readbuf(fd); /* We have a real Avalon! */ avalon = calloc(1, sizeof(struct cgpu_info)); avalon->drv = &avalon_drv; avalon->device_path = strdup(devpath); avalon->device_fd = fd; avalon->threads = AVALON_MINER_THREADS; add_cgpu(avalon); ret = avalon_reset(fd, &ar); if (ret) { ; /* FIXME: I think IT IS avalon and wait on reset; * avalon_close(fd); * return false; */ } applog(LOG_INFO, "Avalon Detect: Found at %s, mark as %d", devpath, avalon->device_id); avalon->device_data = calloc(sizeof(struct avalon_info), 1); if (unlikely(!(avalon->device_data))) quit(1, "Failed to malloc avalon_info data"); info = avalon->device_data; info->baud = baud; info->miner_count = miner_count; info->asic_count = asic_count; info->timeout = timeout; info->read_count = ((float)info->timeout * AVALON_HASH_TIME_FACTOR * AVALON_TIME_FACTOR) / (float)info->miner_count; info->fan_pwm = AVALON_DEFAULT_FAN_MIN_PWM; info->temp_max = 0; /* This is for check the temp/fan every 3~4s */ info->temp_history_count = (4 / (float)((float)info->timeout * ((float)1.67/0x32))) + 1; if (info->temp_history_count <= 0) info->temp_history_count = 1; info->temp_history_index = 0; info->temp_sum = 0; info->temp_old = 0; info->frequency = frequency; /* Set asic to idle mode after detect */ avalon_idle(avalon); avalon->device_fd = -1; avalon_close(fd); return true; } static bool avalon_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, avalon_detect_one); } static void __avalon_init(struct cgpu_info *avalon) { applog(LOG_INFO, "Avalon: Opened on %s", avalon->device_path); } static void avalon_init(struct cgpu_info *avalon) { struct avalon_info *info = avalon->device_data; struct avalon_result ar; int fd, ret; avalon->device_fd = -1; fd = avalon_open(avalon->device_path, info->baud); if (unlikely(fd == -1)) { applog(LOG_ERR, "Avalon: Failed to open on %s", avalon->device_path); return; } ret = avalon_reset(fd, &ar); if (ret) { avalon_close(fd); return; } avalon->device_fd = fd; __avalon_init(avalon); } static bool avalon_prepare(struct thr_info *thr) { struct cgpu_info *avalon = thr->cgpu; struct avalon_info *info = avalon->device_data; free(avalon->works); avalon->works = calloc(info->miner_count * sizeof(struct work *), AVALON_ARRAY_SIZE); if (!avalon->works) quithere(1, "Failed to calloc avalon works"); if (avalon->device_fd == -1) avalon_init(avalon); else __avalon_init(avalon); avalon->status = LIFE_INIT2; return true; } static void avalon_free_work(struct thr_info *thr) { struct cgpu_info *avalon; struct avalon_info *info; struct work **works; int i; avalon = thr->cgpu; avalon->queued = 0; if (unlikely(!avalon->works)) return; works = avalon->works; info = avalon->device_data; for (i = 0; i < info->miner_count * 4; i++) { if (works[i]) { work_completed(avalon, works[i]); works[i] = NULL; } } } static void do_avalon_close(struct thr_info *thr) { struct avalon_result ar; struct cgpu_info *avalon = thr->cgpu; struct avalon_info *info = avalon->device_data; avalon_free_work(thr); cgsleep_ms(1000); avalon_reset(avalon->device_fd, &ar); avalon_idle(avalon); avalon_close(avalon->device_fd); avalon->device_fd = -1; info->no_matching_work = 0; } static inline void record_temp_fan(struct avalon_info *info, struct avalon_result *ar, float *temp_avg) { info->fan0 = ar->fan0 * AVALON_FAN_FACTOR; info->fan1 = ar->fan1 * AVALON_FAN_FACTOR; info->fan2 = ar->fan2 * AVALON_FAN_FACTOR; info->temp0 = ar->temp0; info->temp1 = ar->temp1; info->temp2 = ar->temp2; if (ar->temp0 & 0x80) { ar->temp0 &= 0x7f; info->temp0 = 0 - ((~ar->temp0 & 0x7f) + 1); } if (ar->temp1 & 0x80) { ar->temp1 &= 0x7f; info->temp1 = 0 - ((~ar->temp1 & 0x7f) + 1); } if (ar->temp2 & 0x80) { ar->temp2 &= 0x7f; info->temp2 = 0 - ((~ar->temp2 & 0x7f) + 1); } *temp_avg = info->temp2 > info->temp1 ? info->temp2 : info->temp1; if (info->temp0 > info->temp_max) info->temp_max = info->temp0; if (info->temp1 > info->temp_max) info->temp_max = info->temp1; if (info->temp2 > info->temp_max) info->temp_max = info->temp2; } static inline void adjust_fan(struct avalon_info *info) { int temp_new; temp_new = info->temp_sum / info->temp_history_count; if (temp_new < 35) { info->fan_pwm = AVALON_DEFAULT_FAN_MIN_PWM; info->temp_old = temp_new; } else if (temp_new > 55) { info->fan_pwm = AVALON_DEFAULT_FAN_MAX_PWM; info->temp_old = temp_new; } else if (abs(temp_new - info->temp_old) >= 2) { info->fan_pwm = AVALON_DEFAULT_FAN_MIN_PWM + (temp_new - 35) * 6.4; info->temp_old = temp_new; } } /* We use a replacement algorithm to only remove references to work done from * the buffer when we need the extra space for new work. */ static bool avalon_fill(struct cgpu_info *avalon) { struct avalon_info *info = avalon->device_data; int subid, slot, mc; struct work *work; mc = info->miner_count; if (avalon->queued >= mc) return true; work = get_queued(avalon); if (unlikely(!work)) return false; subid = avalon->queued++; work->subid = subid; slot = avalon->work_array * mc + subid; if (likely(avalon->works[slot])) work_completed(avalon, avalon->works[slot]); avalon->works[slot] = work; if (avalon->queued >= mc) return true; return false; } static void avalon_rotate_array(struct cgpu_info *avalon) { avalon->queued = 0; if (++avalon->work_array >= AVALON_ARRAY_SIZE) avalon->work_array = 0; } static int64_t avalon_scanhash(struct thr_info *thr) { struct cgpu_info *avalon; struct work **works; int fd, ret = AVA_GETS_OK, full; struct avalon_info *info; struct avalon_task at; struct avalon_result ar; int i; int avalon_get_work_count; int start_count, end_count; struct timeval tv_start, tv_finish, elapsed; uint32_t nonce; int64_t hash_count; static int first_try = 0; int result_wrong; avalon = thr->cgpu; works = avalon->works; info = avalon->device_data; avalon_get_work_count = info->miner_count; if (unlikely(avalon->device_fd == -1)) { if (!avalon_prepare(thr)) { applog(LOG_ERR, "AVA%i: Comms error(open)", avalon->device_id); dev_error(avalon, REASON_DEV_COMMS_ERROR); /* fail the device if the reopen attempt fails */ return -1; } } fd = avalon->device_fd; #ifndef WIN32 tcflush(fd, TCOFLUSH); #endif start_count = avalon->work_array * avalon_get_work_count; end_count = start_count + avalon_get_work_count; i = start_count; while (true) { avalon_init_task(&at, 0, 0, info->fan_pwm, info->timeout, info->asic_count, info->miner_count, 1, 0, info->frequency); avalon_create_task(&at, works[i]); ret = avalon_send_task(fd, &at, avalon); if (unlikely(ret == AVA_SEND_ERROR || (ret == AVA_SEND_BUFFER_EMPTY && (i + 1 == end_count) && first_try))) { do_avalon_close(thr); applog(LOG_ERR, "AVA%i: Comms error(buffer)", avalon->device_id); dev_error(avalon, REASON_DEV_COMMS_ERROR); first_try = 0; cgsleep_ms(1000); avalon_init(avalon); return 0; /* This should never happen */ } if (ret == AVA_SEND_BUFFER_EMPTY && (i + 1 == end_count)) { first_try = 1; avalon_rotate_array(avalon); return 0xffffffff; } works[i]->blk.nonce = 0xffffffff; if (ret == AVA_SEND_BUFFER_FULL) break; i++; } if (unlikely(first_try)) first_try = 0; elapsed.tv_sec = elapsed.tv_usec = 0; cgtime(&tv_start); result_wrong = 0; hash_count = 0; while (true) { full = avalon_buffer_full(fd); applog(LOG_DEBUG, "Avalon: Buffer full: %s", ((full == AVA_BUFFER_FULL) ? "Yes" : "No")); if (unlikely(full == AVA_BUFFER_EMPTY)) break; ret = avalon_get_result(fd, &ar, thr, &tv_finish); if (unlikely(ret == AVA_GETS_ERROR)) { do_avalon_close(thr); applog(LOG_ERR, "AVA%i: Comms error(read)", avalon->device_id); dev_error(avalon, REASON_DEV_COMMS_ERROR); return 0; } if (unlikely(ret == AVA_GETS_RESTART)) break; if (unlikely(ret == AVA_GETS_TIMEOUT)) { timersub(&tv_finish, &tv_start, &elapsed); applog(LOG_DEBUG, "Avalon: no nonce in (%ld.%06lds)", (long)elapsed.tv_sec, (long)elapsed.tv_usec); continue; } if (!avalon_decode_nonce(thr, &ar, &nonce)) { info->no_matching_work++; result_wrong++; if (unlikely(result_wrong >= avalon_get_work_count)) break; if (opt_debug) { timersub(&tv_finish, &tv_start, &elapsed); applog(LOG_DEBUG,"Avalon: no matching work: %d" " (%ld.%06lds)", info->no_matching_work, (long)elapsed.tv_sec, (long)elapsed.tv_usec); } continue; } hash_count += 0xffffffff; if (opt_debug) { timersub(&tv_finish, &tv_start, &elapsed); applog(LOG_DEBUG, "Avalon: nonce = 0x%08"PRIx32" = 0x%08"PRIx64" hashes " "(%ld.%06lds)", nonce, (uint64_t)hash_count, (long)elapsed.tv_sec, (long)elapsed.tv_usec); } } if (hash_count && avalon->results < AVALON_ARRAY_SIZE) avalon->results++; if (unlikely((result_wrong >= avalon_get_work_count) || (!hash_count && ret != AVA_GETS_RESTART && --avalon->results < 0))) { /* Look for all invalid results, or consecutive failure * to generate any results suggesting the FPGA * controller has screwed up. */ do_avalon_close(thr); applog(LOG_ERR, "AVA%i: FPGA controller messed up, %d wrong results", avalon->device_id, result_wrong); dev_error(avalon, REASON_DEV_COMMS_ERROR); cgsleep_ms(1000); avalon_init(avalon); return 0; } avalon_rotate_array(avalon); if (hash_count) { record_temp_fan(info, &ar, &(avalon->temp)); avalon->temp = info->temp_max; applog(LOG_INFO, "Avalon: Fan1: %d/m, Fan2: %d/m, Fan3: %d/m\t" "Temp1: %dC, Temp2: %dC, Temp3: %dC, TempMAX: %dC", info->fan0, info->fan1, info->fan2, info->temp0, info->temp1, info->temp2, info->temp_max); info->temp_history_index++; info->temp_sum += avalon->temp; applog(LOG_DEBUG, "Avalon: temp_index: %d, temp_count: %d, temp_old: %d", info->temp_history_index, info->temp_history_count, info->temp_old); if (info->temp_history_index == info->temp_history_count) { adjust_fan(info); info->temp_history_index = 0; info->temp_sum = 0; } } /* This hashmeter is just a utility counter based on returned shares */ return hash_count; } static struct api_data *avalon_api_stats(struct cgpu_info *cgpu) { struct api_data *root = NULL; struct avalon_info *info = cgpu->device_data; int i; root = api_add_int(root, "baud", &(info->baud), false); root = api_add_int(root, "miner_count", &(info->miner_count),false); root = api_add_int(root, "asic_count", &(info->asic_count), false); root = api_add_int(root, "read_count", &(info->read_count), false); root = api_add_int(root, "timeout", &(info->timeout), false); root = api_add_int(root, "frequency", &(info->frequency), false); root = api_add_int(root, "fan1", &(info->fan0), false); root = api_add_int(root, "fan2", &(info->fan1), false); root = api_add_int(root, "fan3", &(info->fan2), false); root = api_add_int(root, "temp1", &(info->temp0), false); root = api_add_int(root, "temp2", &(info->temp1), false); root = api_add_int(root, "temp3", &(info->temp2), false); root = api_add_int(root, "temp_max", &(info->temp_max), false); root = api_add_int(root, "no_matching_work", &(info->no_matching_work), false); for (i = 0; i < info->miner_count; i++) { char mcw[24]; sprintf(mcw, "match_work_count%d", i + 1); root = api_add_int(root, mcw, &(info->matching_work[i]), false); } return root; } static void avalon_shutdown(struct thr_info *thr) { do_avalon_close(thr); } struct device_drv avalon_drv = { .dname = "avalon", .name = "AVA", .lowl_probe_by_name_only = true, .lowl_probe = avalon_lowl_probe, .thread_prepare = avalon_prepare, .minerloop = hash_queued_work, .queue_full = avalon_fill, .scanwork = avalon_scanhash, .get_api_stats = avalon_api_stats, .reinit_device = avalon_init, .thread_shutdown = avalon_shutdown, }; bfgminer-bfgminer-3.10.0/driver-avalon.h000066400000000000000000000060611226556647300201200ustar00rootroot00000000000000/* * Copyright 2013 Avalon project * Copyright 2013 Con Kolivas * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef AVALON_H #define AVALON_H #ifdef USE_AVALON #include #define AVALON_TIME_FACTOR 10 #define AVALON_RESET_FAULT_DECISECONDS 1 #define AVALON_MINER_THREADS 1 #define AVALON_IO_SPEED 115200 #define AVALON_HASH_TIME_FACTOR ((float)1.67/0x32) #define AVALON_RESET_PITCH (300*1000*1000) #define AVALON_FAN_FACTOR 120 #define AVALON_DEFAULT_FAN_MAX_PWM 0xA0 /* 100% */ #define AVALON_DEFAULT_FAN_MIN_PWM 0x20 /* 20% */ #define AVALON_DEFAULT_TIMEOUT 0x32 #define AVALON_DEFAULT_FREQUENCY 256 #define AVALON_DEFAULT_MINER_NUM 0x20 #define AVALON_DEFAULT_ASIC_NUM 0xA #define AVALON_FTDI_READSIZE 512 struct avalon_task { uint8_t reset :1; uint8_t flush_fifo :1; uint8_t fan_eft :1; uint8_t timer_eft :1; uint8_t asic_num :4; uint8_t fan_pwm_data; uint8_t timeout_data; uint8_t miner_num; uint8_t nonce_elf :1; uint8_t gate_miner_elf :1; uint8_t asic_pll :1; uint8_t gate_miner :1; uint8_t _pad0 :4; uint8_t _pad1[3]; uint32_t _pad2; uint8_t midstate[32]; uint8_t data[12]; } __attribute__((packed, aligned(4))); struct avalon_result { uint32_t nonce; uint8_t data[12]; uint8_t midstate[32]; uint8_t fan0; uint8_t fan1; uint8_t fan2; uint8_t temp0; uint8_t temp1; uint8_t temp2; uint8_t _pad0[2]; uint16_t fifo_wp; uint16_t fifo_rp; uint8_t chip_num; uint8_t pwm_data; uint8_t timeout; uint8_t miner_num; } __attribute__((packed, aligned(4))); struct avalon_info { int read_count; int baud; int miner_count; int asic_count; int timeout; int fan0; int fan1; int fan2; int temp0; int temp1; int temp2; int temp_max; int temp_history_count; int temp_history_index; int temp_sum; int temp_old; int fan_pwm; int no_matching_work; int matching_work[AVALON_DEFAULT_MINER_NUM]; int frequency; }; #define AVALON_WRITE_SIZE (sizeof(struct avalon_task)) #define AVALON_READ_SIZE (sizeof(struct avalon_result)) #define AVALON_ARRAY_SIZE 4 #define AVA_GETS_ERROR -1 #define AVA_GETS_OK 0 #define AVA_GETS_RESTART 1 #define AVA_GETS_TIMEOUT 2 #define AVA_SEND_ERROR -1 #define AVA_SEND_OK 0 #define AVA_SEND_BUFFER_EMPTY 1 #define AVA_SEND_BUFFER_FULL 2 #define AVA_BUFFER_FULL 0 #define AVA_BUFFER_EMPTY 1 #define avalon_open2(devpath, baud, purge) serial_open(devpath, baud, AVALON_RESET_FAULT_DECISECONDS, purge) #define avalon_open(devpath, baud) avalon_open2(devpath, baud, true) #define avalon_close(fd) close(fd) #define avalon_buffer_full(fd) get_serial_cts(fd) #define AVALON_READ_TIME(baud) ((double)AVALON_READ_SIZE * (double)8.0 / (double)(baud)) #define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1] ASSERT1(sizeof(uint32_t) == 4); extern struct avalon_info **avalon_info; #endif /* USE_AVALON */ #endif /* AVALON_H */ bfgminer-bfgminer-3.10.0/driver-bfsb.c000066400000000000000000000114141226556647300175450ustar00rootroot00000000000000/* * Copyright 2013 bitfury * Copyright 2013 Anatoly Legkodymov * Copyright 2013 Luke Dashjr * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config.h" #include #include "deviceapi.h" #include "libbitfury.h" #include "spidevc.h" #include "driver-bitfury.h" BFG_REGISTER_DRIVER(bfsb_drv) static bool bfsb_spi_txrx(struct spi_port *port) { struct cgpu_info * const proc = port->cgpu; struct bitfury_device * const bitfury = proc->device_data; spi_bfsb_select_bank(bitfury->slot); const bool rv = sys_spi_txrx(port); return rv; } static int bfsb_autodetect() { RUNONCE(0); struct cgpu_info *cgpu = NULL, *proc1 = NULL, *prev_cgpu = NULL; int proc_count = 0; applog(LOG_INFO, "INFO: bitfury_detect"); spi_init(); if (!sys_spi) return 0; struct bitfury_device **devicelist, *bitfury; struct spi_port *port; int i, j; struct bitfury_device dummy_bitfury; struct cgpu_info dummy_cgpu; dummy_cgpu.device_data = &dummy_bitfury; for (i = 0; i < 4; i++) { int chip_n; port = malloc(sizeof(*port)); *port = *sys_spi; port->cgpu = &dummy_cgpu; port->txrx = bfsb_spi_txrx; port->speed = 625000; dummy_bitfury.slot = i; chip_n = libbitfury_detectChips1(port); if (chip_n) { applog(LOG_WARNING, "BITFURY slot %d: %d chips detected", i, chip_n); devicelist = malloc(sizeof(*devicelist) * chip_n); for (j = 0; j < chip_n; ++j) { devicelist[j] = bitfury = malloc(sizeof(*bitfury)); *bitfury = (struct bitfury_device){ .spi = port, .slot = i, .fasync = j, }; } cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &bfsb_drv, .procs = chip_n, .device_data = devicelist, }; add_cgpu_slave(cgpu, prev_cgpu); proc_count += chip_n; if (!proc1) proc1 = cgpu; prev_cgpu = cgpu; } else free(port); } if (proc1) proc1->threads = 1; return proc_count; } static void bfsb_detect(void) { noserial_detect_manual(&bfsb_drv, bfsb_autodetect); } static bool bfsb_init(struct thr_info *thr) { struct bitfury_device **devicelist; struct cgpu_info *proc; struct bitfury_device *bitfury; for (proc = thr->cgpu; proc; proc = proc->next_proc) { devicelist = proc->device_data; bitfury = devicelist[proc->proc_id]; proc->device_data = bitfury; bitfury->spi->cgpu = proc; bitfury_init_chip(proc); bitfury->osc6_bits = 53; bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury_init_freq_stat(&bitfury->chip_stat, 52, 56); if (proc->proc_id == proc->procs - 1) free(devicelist); } timer_set_now(&thr->tv_poll); return true; } static void bfsb_shutdown(struct thr_info *thr) { bitfury_shutdown(thr); spi_bfsb_select_bank(-1); } static struct api_data *bfsb_api_device_detail(struct cgpu_info *cgpu) { struct bitfury_device * const bitfury = cgpu->device_data; struct api_data *root = bitfury_api_device_detail(cgpu); root = api_add_uint(root, "Slot", &(bitfury->slot), false); return root; } struct device_drv bfsb_drv = { .dname = "bfsb", .name = "BSB", .drv_detect = bfsb_detect, .minerloop = minerloop_async, .job_prepare = bitfury_job_prepare, .thread_init = bfsb_init, .poll = bitfury_do_io, .job_start = bitfury_noop_job_start, .job_process_results = bitfury_job_process_results, .get_api_extra_device_detail = bfsb_api_device_detail, .get_api_extra_device_status = bitfury_api_device_status, .set_device = bitfury_set_device, .thread_disable = bitfury_disable, .thread_enable = bitfury_enable, .thread_shutdown = bfsb_shutdown, #ifdef HAVE_CURSES .proc_wlogprint_status = bitfury_wlogprint_status, .proc_tui_wlogprint_choices = bitfury_tui_wlogprint_choices, .proc_tui_handle_choice = bitfury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-bifury.c000066400000000000000000000347301226556647300201370ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include "deviceapi.h" #include "logging.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "miner.h" #include "util.h" #define BIFURY_MAX_QUEUED 0x10 BFG_REGISTER_DRIVER(bifury_drv) const char bifury_init_cmds[] = "flush\ntarget ffffffff\nmaxroll 0\n"; static ssize_t bifury_write(const struct cgpu_info * const dev, const void * const buf, const size_t count) { const int fd = dev->device_fd; if (opt_dev_protocol) { const int psz = (((const char*)buf)[count-1] == '\n') ? (count - 1) : count; applog(LOG_DEBUG, "%s: DEVPROTO: SEND %.*s", dev->dev_repr, psz, (const char*)buf); } return write(fd, buf, count); } static void *bifury_readln(int fd, bytes_t *leftover) { uint8_t buf[0x40]; ssize_t r; parse: if ( (r = bytes_find(leftover, '\n')) >= 0) { uint8_t *ret = malloc(r+1); if (r) memcpy(ret, bytes_buf(leftover), r); ret[r] = '\0'; bytes_shift(leftover, r + 1); return ret; } if ( (r = read(fd, buf, sizeof(buf))) > 0) { bytes_append(leftover, buf, r); goto parse; } return NULL; } struct bifury_state { bytes_t buf; uint32_t last_work_id; int needwork; bool has_needwork; uint8_t *osc6_bits; bool send_clock; }; static bool bifury_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "bi\xe2\x80\xa2""fury"); } static bool bifury_detect_one(const char * const devpath) { char buf[0x40], *p, *q, *s; bytes_t reply = BYTES_INIT; int major, minor, hwrev, chips; struct cgpu_info *cgpu; struct timeval tv_timeout; const int fd = serial_open(devpath, 0, 10, true); applog(LOG_DEBUG, "%s: %s %s", bifury_drv.dname, ((fd == -1) ? "Failed to open" : "Successfully opened"), devpath); if (unlikely(fd == -1)) return false; while (read(fd, buf, sizeof(buf)) == sizeof(buf)) {} if (opt_dev_protocol) applog(LOG_DEBUG, "%s fd=%d: DEVPROTO: SEND %s", bifury_drv.dname, fd, "version"); if (8 != write(fd, "version\n", 8)) { applog(LOG_DEBUG, "%s: Error sending version request", bifury_drv.dname); goto err; } timer_set_delay_from_now(&tv_timeout, 1000000); while (true) { p = bifury_readln(fd, &reply); if (p) { if (opt_dev_protocol) applog(LOG_DEBUG, "%s fd=%d: DEVPROTO: RECV %s", bifury_drv.dname, fd, p); if (!strncmp("version ", p, 8)) break; free(p); } if (timer_passed(&tv_timeout, NULL)) { applog(LOG_DEBUG, "%s: Timed out waiting for response to version request", bifury_drv.dname); goto err; } } bytes_free(&reply); serial_close(fd); s = p; major = strtol(&p[8], &p, 10); if (p == &buf[8] || p[0] != '.') goto parseerr; minor = strtol(&p[1], &q, 10); if (p == q || strncmp(" rev ", q, 5)) goto parseerr; hwrev = strtol(&q[5], &p, 10); if (p == q || strncmp(" chips ", p, 7)) goto parseerr; chips = strtol(&p[7], &q, 10); if (p == q || chips < 1) goto parseerr; free(s); applog(LOG_DEBUG, "%s: Found firmware %d.%d on hardware rev %d with %d chips", bifury_drv.dname, major, minor, hwrev, chips); cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &bifury_drv, .device_path = strdup(devpath), .deven = DEV_ENABLED, .procs = chips, .threads = 1, .cutofftemp = 75, }; // NOTE: Xcode's clang has a bug where it cannot find fields inside anonymous unions (more details in fpgautils) cgpu->device_fd = -1; return add_cgpu(cgpu); parseerr: applog(LOG_DEBUG, "%s: Error parsing version response", bifury_drv.dname); free(s); return false; err: bytes_free(&reply); serial_close(fd); return false; } static bool bifury_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, bifury_detect_one); } static bool bifury_set_queue_full(const struct cgpu_info * const dev, int needwork) { struct bifury_state * const state = dev->device_data; struct thr_info * const master_thr = dev->thr[0]; const int fd = dev->device_fd; if (needwork != -1) state->needwork = needwork; const bool full = (fd == -1 || !state->needwork); if (full == master_thr->queue_full) return full; for (const struct cgpu_info *proc = dev; proc; proc = proc->next_proc) { struct thr_info * const thr = proc->thr[0]; thr->queue_full = full; } return full; } void bifury_send_clock(const struct cgpu_info * const dev) { struct bifury_state * const state = dev->device_data; const struct cgpu_info *proc; size_t clockbufsz = 5 + (3 * dev->procs) + 1 + 1; char clockbuf[clockbufsz]; strcpy(clockbuf, "clock"); proc = dev; for (int i = 0; i < dev->procs; ++i, (proc = proc->next_proc)) { const struct thr_info * const thr = proc->thr[0]; int clk; if (proc->deven == DEV_ENABLED && !thr->pause) clk = state->osc6_bits[i]; else clk = 0; tailsprintf(clockbuf, clockbufsz, " %d", clk); } tailsprintf(clockbuf, clockbufsz, "\n"); --clockbufsz; if (clockbufsz != bifury_write(dev, clockbuf, clockbufsz)) { state->send_clock = true; applog(LOG_ERR, "%s: Failed to send clock assignments", dev->dev_repr); } else state->send_clock = false; } static bool bifury_thread_init(struct thr_info *master_thr) { struct cgpu_info * const dev = master_thr->cgpu, *proc; struct bifury_state * const state = malloc(sizeof(*state)); if (!state) return false; *state = (struct bifury_state){ .buf = BYTES_INIT, .osc6_bits = malloc(sizeof(*state->osc6_bits) * dev->procs), }; for (int i = 0; i < dev->procs; ++i) state->osc6_bits[i] = 54; for (proc = dev; proc; proc = proc->next_proc) { proc->device_data = state; proc->status = LIFE_INIT2; } bifury_set_queue_full(dev, 0); timer_set_now(&master_thr->tv_poll); return true; } static void bifury_reinit(struct cgpu_info * const proc) { timer_set_now(&proc->thr[0]->tv_poll); } void bifury_trigger_send_clock(struct thr_info * const thr) { struct cgpu_info * const proc = thr->cgpu; struct bifury_state * const state = proc->device_data; state->send_clock = true; } static void bifury_common_error(struct cgpu_info * const dev, const enum dev_reason reason) { for (struct cgpu_info *proc = dev; proc; proc = proc->next_proc) { struct thr_info * const thr = proc->thr[0]; dev_error(proc, reason); inc_hw_errors_only(thr); } } static bool bifury_queue_append(struct thr_info * const thr, struct work *work) { const struct cgpu_info * const dev = thr->cgpu->device; struct bifury_state * const state = dev->device_data; if (bifury_set_queue_full(dev, -1)) return false; struct thr_info * const master_thr = dev->thr[0]; char buf[5 + 0x98 + 1 + 8 + 1]; memcpy(buf, "work ", 5); bin2hex(&buf[5], work->data, 0x4c); work->device_id = ++state->last_work_id; sprintf(&buf[5 + 0x98], " %08x", work->device_id); buf[5 + 0x98 + 1 + 8] = '\n'; if (sizeof(buf) != bifury_write(dev, buf, sizeof(buf))) { applog(LOG_ERR, "%s: Failed to send work", dev->dev_repr); return false; } HASH_ADD(hh, master_thr->work_list, device_id, sizeof(work->device_id), work); int prunequeue = HASH_COUNT(master_thr->work_list) - BIFURY_MAX_QUEUED; if (prunequeue > 0) { struct work *tmp; applog(LOG_DEBUG, "%s: Pruning %d old work item%s", dev->dev_repr, prunequeue, prunequeue == 1 ? "" : "s"); HASH_ITER(hh, master_thr->work_list, work, tmp) { HASH_DEL(master_thr->work_list, work); free_work(work); if (--prunequeue < 1) break; } } bifury_set_queue_full(dev, state->needwork - 1); return true; } static void bifury_queue_flush(struct thr_info * const thr) { const struct cgpu_info *dev = thr->cgpu; if (dev != dev->device) return; const int fd = dev->device_fd; if (fd != -1) bifury_write(dev, "flush\n", 6); bifury_set_queue_full(dev, dev->procs); } static const struct cgpu_info *device_proc_by_id(const struct cgpu_info * const dev, int procid) { const struct cgpu_info *proc = dev; for (int i = 0; i < procid; ++i) { proc = proc->next_proc; if (unlikely(!proc)) return NULL; } return proc; } static void bifury_handle_cmd(struct cgpu_info * const dev, const char * const cmd) { struct thr_info * const master_thr = dev->thr[0]; struct bifury_state * const state = dev->device_data; struct thr_info *thr; struct work *work; char *p; if (!strncmp(cmd, "submit ", 7)) { // submit uint32_t nonce = strtoll(&cmd[7], &p, 0x10); const uint32_t jobid = strtoll(&p[1], &p, 0x10); const uint32_t ntime = strtoll(&p[1], &p, 0x10); const int chip = atoi(&p[1]); nonce = le32toh(nonce); const struct cgpu_info *proc = device_proc_by_id(dev, chip); if (unlikely(!proc)) proc = dev; thr = proc->thr[0]; HASH_FIND(hh, master_thr->work_list, &jobid, sizeof(jobid), work); if (work) { const uint32_t work_ntime = be32toh(*(uint32_t*)&work->data[68]); submit_noffset_nonce(thr, work, nonce, ntime - work_ntime); } else if (!jobid) applog(LOG_DEBUG, "%s: Dummy submit ignored", dev->dev_repr); else inc_hw_errors2(thr, NULL, &nonce); if (!state->has_needwork) bifury_set_queue_full(dev, state->needwork + 2); } else if (!strncmp(cmd, "temp ", 5)) { struct cgpu_info *proc; const int decicelsius = atoi(&cmd[5]); if (decicelsius) { const float celsius = 0.1 * (float)decicelsius; for (proc = dev; proc; proc = proc->next_proc) proc->temp = celsius; } } else if (!strncmp(cmd, "job ", 4)) { // job const uint32_t jobid = strtoll(&cmd[4], &p, 0x10); strtoll(&p[1], &p, 0x10); const int chip = atoi(&p[1]); const struct cgpu_info * const proc = device_proc_by_id(dev, chip); HASH_FIND(hh, master_thr->work_list, &jobid, sizeof(jobid), work); if (likely(work)) { if (likely(proc)) { thr = proc->thr[0]; hashes_done2(thr, 0xbd000000, NULL); } else applog(LOG_DEBUG, "%s: Unknown chip id: %s", dev->dev_repr, cmd); HASH_DEL(master_thr->work_list, work); free_work(work); } else applog(LOG_WARNING, "%s: Unknown job id: %s", dev->dev_repr, cmd); } else if (!strncmp(cmd, "hwerror ", 8)) { const int chip = atoi(&cmd[8]); const struct cgpu_info * const proc = device_proc_by_id(dev, chip); if (unlikely(!proc)) applogr(, LOG_DEBUG, "%s: Unknown chip id: %s", dev->dev_repr, cmd); thr = proc->thr[0]; inc_hw_errors2(thr, NULL, UNKNOWN_NONCE); } else if (!strncmp(cmd, "needwork ", 9)) { const int needwork = atoi(&cmd[9]); state->has_needwork = true; bifury_set_queue_full(dev, needwork); applog(LOG_DEBUG, "%s: needwork=%d", dev->dev_repr, state->needwork); } } static void bifury_poll(struct thr_info * const master_thr) { struct cgpu_info * const dev = master_thr->cgpu; struct bifury_state * const state = dev->device_data; int fd = dev->device_fd; char *cmd; if (unlikely(fd == -1)) { fd = serial_open(dev->device_path, 0, 1, true); if (unlikely(fd == -1)) { applog(LOG_ERR, "%s: Failed to open %s", dev->dev_repr, dev->device_path); bifury_common_error(dev, REASON_THREAD_FAIL_INIT); return; } dev->device_fd = fd; if (sizeof(bifury_init_cmds)-1 != bifury_write(dev, bifury_init_cmds, sizeof(bifury_init_cmds)-1)) { applog(LOG_ERR, "%s: Failed to send configuration", dev->dev_repr); bifury_common_error(dev, REASON_THREAD_FAIL_INIT); serial_close(fd); dev->device_fd = -1; return; } bifury_set_queue_full(dev, dev->procs * 2); state->send_clock = true; } if (state->send_clock) bifury_send_clock(dev); while ( (cmd = bifury_readln(fd, &state->buf)) ) { if (opt_dev_protocol) applog(LOG_DEBUG, "%s: DEVPROTO: RECV %s", dev->dev_repr, cmd); bifury_handle_cmd(dev, cmd); free(cmd); } } static struct api_data *bifury_api_device_status(struct cgpu_info * const proc) { struct bifury_state * const state = proc->device_data; struct api_data *root = NULL; int osc6_bits = state->osc6_bits[proc->proc_id]; root = api_add_int(root, "Clock Bits", &osc6_bits, true); return root; } char *bifury_set_device(struct cgpu_info * const proc, char * const option, char * const setting, char * const replybuf) { struct bifury_state * const state = proc->device_data; if (!strcasecmp(option, "help")) { sprintf(replybuf, "osc6_bits: range 33-63 (slow to fast)"); return replybuf; } if (!strcasecmp(option, "osc6_bits")) { if (!setting || !*setting) { sprintf(replybuf, "missing setting"); return replybuf; } const uint8_t val = atoi(setting); if (val < 33 || val > 63) { sprintf(replybuf, "invalid setting"); return replybuf; } state->osc6_bits[proc->proc_id] = val; state->send_clock = true; return NULL; } sprintf(replybuf, "Unknown option: %s", option); return replybuf; } #ifdef HAVE_CURSES void bifury_tui_wlogprint_choices(struct cgpu_info * const proc) { wlogprint("[O]scillator bits "); } const char *bifury_tui_handle_choice(struct cgpu_info * const proc, const int input) { struct bifury_state * const state = proc->device_data; switch (input) { case 'o': case 'O': { const int val = curses_int("Set oscillator bits (range 33-63; slow to fast)"); if (val < 33 || val > 63) return "Invalid oscillator bits\n"; state->osc6_bits[proc->proc_id] = val; state->send_clock = true; return "Oscillator bits changing\n"; } } return NULL; } void bifury_wlogprint_status(struct cgpu_info * const proc) { const struct bifury_state * const state = proc->device_data; const int osc6_bits = state->osc6_bits[proc->proc_id]; wlogprint("Oscillator bits: %d\n", osc6_bits); } #endif struct device_drv bifury_drv = { .dname = "bifury", .name = "BIF", .lowl_match = bifury_lowl_match, .lowl_probe = bifury_lowl_probe, .thread_init = bifury_thread_init, .reinit_device = bifury_reinit, .thread_disable = bifury_trigger_send_clock, .thread_enable = bifury_trigger_send_clock, .minerloop = minerloop_queue, .queue_append = bifury_queue_append, .queue_flush = bifury_queue_flush, .poll = bifury_poll, .get_api_extra_device_status = bifury_api_device_status, .set_device = bifury_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = bifury_wlogprint_status, .proc_tui_wlogprint_choices = bifury_tui_wlogprint_choices, .proc_tui_handle_choice = bifury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-bigpic.c000066400000000000000000000201471226556647300200710ustar00rootroot00000000000000/* * Copyright 2013 Andreas Auer * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ /* * Big Picture Mining USB miner with Bitfury ASIC */ #include "config.h" #include #include #include "miner.h" #include "logging.h" #include "libbitfury.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "deviceapi.h" #include "sha2.h" #include "driver-bigpic.h" #include BFG_REGISTER_DRIVER(bigpic_drv) static bool bigpic_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "Bitfury", "BF1"); } //------------------------------------------------------------------------------ static bool bigpic_detect_custom(const char *devpath, struct device_drv *api, struct bigpic_info *info) { int fd = serial_open(devpath, info->baud, 1, true); if(fd < 0) { return false; } char buf[sizeof(struct bigpic_identity)+1]; int len; if (1 != write(fd, "I", 1)) { applog(LOG_ERR, "%s: Failed writing id request to %s", bigpic_drv.dname, devpath); serial_close(fd); return false; } len = serial_read(fd, buf, sizeof(buf)); if(len != 14) { serial_close(fd); return false; } info->id.version = buf[1]; memcpy(info->id.product, buf+2, 8); memcpy(&info->id.serial, buf+10, 4); info->id.serial = le32toh(info->id.serial); applog(LOG_DEBUG, "%s: %s: %d, %s %08x", bigpic_drv.dname, devpath, info->id.version, info->id.product, info->id.serial); char buf_state[sizeof(struct bigpic_state)+1]; len = 0; if (1 != write(fd, "R", 1)) { applog(LOG_ERR, "%s: Failed writing reset request to %s", bigpic_drv.dname, devpath); serial_close(fd); return false; } int limit = 50; while (len == 0 && --limit) { len = serial_read(fd, buf, sizeof(buf_state)); cgsleep_ms(100); } serial_close(fd); if(len != 7) { applog(LOG_ERR, "%s: %s not responding to reset: %d", bigpic_drv.dname, devpath, len); return false; } if (serial_claim_v(devpath, api)) return false; struct cgpu_info *bigpic; bigpic = calloc(1, sizeof(struct cgpu_info)); bigpic->drv = api; bigpic->device_path = strdup(devpath); bigpic->device_fd = -1; bigpic->threads = 1; add_cgpu(bigpic); applog(LOG_INFO, "Found %"PRIpreprv" at %s", bigpic->proc_repr, devpath); applog(LOG_DEBUG, "%"PRIpreprv": Init: baud=%d", bigpic->proc_repr, info->baud); bigpic->device_data = info; return true; } //------------------------------------------------------------------------------ static bool bigpic_detect_one(const char *devpath) { struct bigpic_info *info = calloc(1, sizeof(struct bigpic_info)); if (unlikely(!info)) quit(1, "Failed to malloc bigpicInfo"); info->baud = BPM_BAUD; if (!bigpic_detect_custom(devpath, &bigpic_drv, info)) { free(info); return false; } return true; } static bool bigpic_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, bigpic_detect_one); } //------------------------------------------------------------------------------ static bool bigpic_init(struct thr_info *thr) { struct cgpu_info *bigpic = thr->cgpu; struct bigpic_info *info = (struct bigpic_info *)bigpic->device_data; applog(LOG_DEBUG, "%"PRIpreprv": init", bigpic->proc_repr); int fd = serial_open(bigpic->device_path, info->baud, 1, true); if (unlikely(-1 == fd)) { applog(LOG_ERR, "%"PRIpreprv": Failed to open %s", bigpic->proc_repr, bigpic->device_path); return false; } bigpic->device_fd = fd; applog(LOG_INFO, "%"PRIpreprv": Opened %s", bigpic->proc_repr, bigpic->device_path); info->tx_buffer[0] = 'W'; return true; } //------------------------------------------------------------------------------ static bool duplicate(uint32_t *results, uint32_t size, uint32_t test_nonce) { for(uint32_t i=0; icgpu; struct bigpic_info *info = (struct bigpic_info *)board->device_data; uint32_t results[16*6]; uint32_t num_results; int hwe = 0; uint32_t m7 = *((uint32_t *)&work->data[64]); uint32_t ntime = *((uint32_t *)&work->data[68]); uint32_t nbits = *((uint32_t *)&work->data[72]); num_results = 0; for(int i=0; irx_len; i+=7) { struct bigpic_state state; state.state = info->rx_buffer[i + 1]; state.switched = info->rx_buffer[i + 2]; memcpy(&state.nonce, info->rx_buffer + i + 3, 4); if(duplicate(results, num_results, state.nonce)) continue; state.nonce = le32toh(state.nonce); uint32_t nonce = bitfury_decnonce(state.nonce); results[num_results++] = state.nonce; applog(LOG_DEBUG, "%"PRIpreprv": Len: %lu Cmd: %c State: %c Switched: %d Nonce: %08lx", board->proc_repr, (unsigned long)info->rx_len, info->rx_buffer[i], state.state, state.switched, (unsigned long)nonce); if (bitfury_fudge_nonce(work->midstate, m7, ntime, nbits, &nonce)) submit_nonce(thr, work, nonce); else if (info->rx_buffer[i + 3] != '\xe0' || hwe++) inc_hw_errors(thr, work, nonce); } } static bool bigpic_job_prepare(struct thr_info *thr, struct work *work, __maybe_unused uint64_t max_nonce) { struct cgpu_info *board = thr->cgpu; struct bigpic_info *info = (struct bigpic_info *)board->device_data; memcpy(&info->tx_buffer[ 1], work->midstate, 32); memcpy(&info->tx_buffer[33], &work->data[64], 12); work->blk.nonce = 0xffffffff; return true; } static void bigpic_job_start(struct thr_info *thr) { struct cgpu_info *board = thr->cgpu; struct bigpic_info *info = (struct bigpic_info *)board->device_data; if (opt_dev_protocol && opt_debug) { char hex[91]; bin2hex(hex, info->tx_buffer, 45); applog(LOG_DEBUG, "%"PRIpreprv": SEND: %s", board->proc_repr, hex); } if (45 != write(board->device_fd, info->tx_buffer, 45)) { applog(LOG_ERR, "%"PRIpreprv": Failed writing work task", board->proc_repr); dev_error(board, REASON_DEV_COMMS_ERROR); job_start_abort(thr, true); return; } while(1) { uint8_t buffer[7]; int len; len = serial_read(board->device_fd, buffer, 7); if(len > 0) break; } applog(LOG_DEBUG, "%"PRIpreprv": Work Task sent", board->proc_repr); while(1) { info->rx_len = serial_read(board->device_fd, info->rx_buffer, sizeof(info->rx_buffer)); if(info->rx_len > 0) break; } applog(LOG_DEBUG, "%"PRIpreprv": Work Task accepted", board->proc_repr); applog(LOG_DEBUG, "%"PRIpreprv": Nonces sent back: %d", board->proc_repr, info->rx_len / 7); mt_job_transition(thr); // TODO: Delay morework until right before it's needed timer_set_now(&thr->tv_morework); job_start_complete(thr); } static int64_t bigpic_job_process_results(struct thr_info *thr, struct work *work, bool stopping) { // FIXME: not sure how to handle stopping bigpic_process_results(thr, work); return 0xBD000000; } //------------------------------------------------------------------------------ static void bigpic_shutdown(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; serial_close(cgpu->device_fd); } //------------------------------------------------------------------------------ static bool bigpic_identify(struct cgpu_info *cgpu) { char buf[] = "L"; if (sizeof(buf) != write(cgpu->device_fd, buf, sizeof(buf))) return false; return true; } //------------------------------------------------------------------------------ struct device_drv bigpic_drv = { .dname = "bigpic", .name = "BPM", .probe_priority = -110, .lowl_match = bigpic_lowl_match, .lowl_probe = bigpic_lowl_probe, .identify_device = bigpic_identify, .thread_init = bigpic_init, .minerloop = minerloop_async, .job_prepare = bigpic_job_prepare, .job_start = bigpic_job_start, .job_process_results = bigpic_job_process_results, .thread_shutdown = bigpic_shutdown, }; bfgminer-bfgminer-3.10.0/driver-bigpic.h000066400000000000000000000007201226556647300200710ustar00rootroot00000000000000#ifndef BFG_DRIVER_BIGPIC_H #define BFG_DRIVER_BIGPIC_H #include #define BPM_BAUD 115200 struct bigpic_identity { uint8_t version; char product[8]; uint32_t serial; } __attribute__((packed)); struct bigpic_state { uint8_t state; uint8_t switched; uint32_t nonce; } __attribute__((packed)); struct bigpic_info { uint32_t baud; struct bigpic_identity id; char tx_buffer[45]; char rx_buffer[1024]; uint32_t rx_len; }; #endif bfgminer-bfgminer-3.10.0/driver-bitforce.c000066400000000000000000001633631226556647300204410ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2012 Con Kolivas * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #include #include "compat.h" #include "deviceapi.h" #include "miner.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "util.h" #define BITFORCE_SLEEP_MS 500 #define BITFORCE_TIMEOUT_S 7 #define BITFORCE_TIMEOUT_MS (BITFORCE_TIMEOUT_S * 1000) #define BITFORCE_LONG_TIMEOUT_S 25 #define BITFORCE_LONG_TIMEOUT_MS (BITFORCE_LONG_TIMEOUT_S * 1000) #define BITFORCE_CHECK_INTERVAL_MS 10 #define WORK_CHECK_INTERVAL_MS 50 #define MAX_START_DELAY_MS 100 #define tv_to_ms(tval) ((unsigned long)(tval.tv_sec * 1000 + tval.tv_usec / 1000)) #define TIME_AVG_CONSTANT 8 #define BITFORCE_QRESULT_LINE_LEN 165 #define BITFORCE_MAX_QUEUED_MAX 40 #define BITFORCE_MIN_QUEUED_MAX 10 #define BITFORCE_MAX_QRESULTS 16 #define BITFORCE_GOAL_QRESULTS 5 #define BITFORCE_MIN_QRESULT_WAIT BITFORCE_CHECK_INTERVAL_MS #define BITFORCE_MAX_QRESULT_WAIT 1000 #define BITFORCE_MAX_BQUEUE_AT_ONCE 5 enum bitforce_proto { BFP_WORK, BFP_RANGE, BFP_QUEUE, BFP_BQUEUE, BFP_PQUEUE, }; static const char *protonames[] = { "full work", "nonce range", "work queue", "bulk queue", "parallel queue", }; BFG_REGISTER_DRIVER(bitforce_drv) BFG_REGISTER_DRIVER(bitforce_queue_api) // Code must deal with a timeout #define BFopen(devpath) serial_open(devpath, 0, 250, true) static void BFgets(char *buf, size_t bufLen, int fd) { char *obuf = buf; do { buf[0] = '\0'; --bufLen; } while (likely(bufLen && read(fd, buf, 1) == 1 && (buf++)[0] != '\n')); buf[0] = '\0'; if (unlikely(opt_dev_protocol)) applog(LOG_DEBUG, "DEVPROTO: GETS (fd=%d): %s", fd, obuf); } static ssize_t BFwrite(int fd, const void *buf, ssize_t bufLen) { if ((bufLen) != write(fd, buf, bufLen)) return 0; else return bufLen; } static ssize_t bitforce_send(int fd, int procid, const void *buf, ssize_t bufLen) { if (!procid) return BFwrite(fd, buf, bufLen); if (bufLen > 255) return -1; size_t bufLeft = bufLen + 3; char realbuf[bufLeft], *bufp; ssize_t rv; memcpy(&realbuf[3], buf, bufLen); realbuf[0] = '@'; realbuf[1] = bufLen; realbuf[2] = procid; bufp = realbuf; do { rv = BFwrite(fd, bufp, bufLeft); if (rv <= 0) return rv; bufLeft -= rv; } while (bufLeft > 0); return bufLen; } static void bitforce_cmd1b(int fd, int procid, void *buf, size_t bufsz, const char *cmd, size_t cmdsz) { if (unlikely(opt_dev_protocol)) applog(LOG_DEBUG, "DEVPROTO: CMD1 (fd=%d xlink=%d): %s", fd, procid, cmd); bitforce_send(fd, procid, cmd, cmdsz); BFgets(buf, bufsz, fd); } #define bitforce_cmd1(fd, xlinkid, buf, bufsz, cmd) bitforce_cmd1b(fd, xlinkid, buf, bufsz, cmd, 3) static void bitforce_cmd2(int fd, int procid, void *buf, size_t bufsz, const char *cmd, void *data, size_t datasz) { bitforce_cmd1(fd, procid, buf, bufsz, cmd); if (strncasecmp(buf, "OK", 2)) return; if (unlikely(opt_dev_protocol)) { char hex[(datasz * 2) + 1]; bin2hex(hex, data, datasz); applog(LOG_DEBUG, "DEVPROTO: CMD2 (fd=%d xlink=%d): %s", fd, procid, hex); } bitforce_send(fd, procid, data, datasz); BFgets(buf, bufsz, fd); } #define BFclose(fd) serial_close(fd) struct bitforce_init_data { bool sc; long devmask; int *parallels; }; static int bitforce_chips_to_plan_for(int parallel, int chipcount) { if (parallel < 1) return parallel; if (chipcount > 15) return 32; if (chipcount > 7) return 16; if (chipcount > 3) return 8; if (chipcount > 1) return 4; if (chipcount ) return 2; return 1; } static bool bitforce_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "BitFORCE", "SHA256"); } static bool bitforce_detect_one(const char *devpath) { int fdDev = serial_open(devpath, 0, 10, true); struct cgpu_info *bitforce; char pdevbuf[0x100]; size_t pdevbuf_len; char *s; int procs = 1, parallel = -1; long maxchipno = 0; struct bitforce_init_data *initdata; char *manuf = NULL; applog(LOG_DEBUG, "BFL: Attempting to open %s", devpath); if (unlikely(fdDev == -1)) { applog(LOG_DEBUG, "BFL: Failed to open %s", devpath); return false; } bitforce_cmd1(fdDev, 0, pdevbuf, sizeof(pdevbuf), "ZGX"); if (unlikely(!pdevbuf[0])) { applog(LOG_DEBUG, "BFL: Error reading/timeout (ZGX)"); BFclose(fdDev); return 0; } if (unlikely(!strstr(pdevbuf, "SHA256"))) { applog(LOG_DEBUG, "BFL: Didn't recognise BitForce on %s", devpath); BFclose(fdDev); return false; } if (serial_claim_v(devpath, &bitforce_drv)) { BFclose(fdDev); return false; } applog(LOG_DEBUG, "Found BitForce device on %s", devpath); initdata = malloc(sizeof(*initdata)); *initdata = (struct bitforce_init_data){ .sc = false, }; bitforce_cmd1(fdDev, 0, pdevbuf, sizeof(pdevbuf), "ZCX"); for (int i = 0; (!pdevbuf[0]) && i < 4; ++i) BFgets(pdevbuf, sizeof(pdevbuf), fdDev); for ( ; strncasecmp(pdevbuf, "OK", 2); BFgets(pdevbuf, sizeof(pdevbuf), fdDev) ) { pdevbuf_len = strlen(pdevbuf); if (unlikely(!pdevbuf_len)) continue; pdevbuf[pdevbuf_len-1] = '\0'; // trim newline applog(LOG_DEBUG, " %s", pdevbuf); if (!strncasecmp(pdevbuf, "PROCESSOR ", 10)) maxchipno = max(maxchipno, atoi(&pdevbuf[10])); else if (!strncasecmp(pdevbuf, "DEVICES IN CHAIN:", 17)) procs = atoi(&pdevbuf[17]); else if (!strncasecmp(pdevbuf, "CHAIN PRESENCE MASK:", 20)) initdata->devmask = strtol(&pdevbuf[20], NULL, 16); else if (!strncasecmp(pdevbuf, "DEVICE:", 7) && strstr(pdevbuf, "SC")) initdata->sc = true; else if (!strncasecmp(pdevbuf, "CHIP PARALLELIZATION: YES @", 27)) parallel = atoi(&pdevbuf[27]); else if (!strncasecmp(pdevbuf, "MANUFACTURER:", 13)) { manuf = &pdevbuf[13]; while (manuf[0] && isspace(manuf[0])) ++manuf; if (manuf[0]) manuf = strdup(manuf); else manuf = NULL; } } parallel = bitforce_chips_to_plan_for(parallel, maxchipno); initdata->parallels = malloc(sizeof(initdata->parallels[0]) * procs); initdata->parallels[0] = parallel; parallel = abs(parallel); for (int proc = 1; proc < procs; ++proc) { applog(LOG_DEBUG, "Slave board %d:", proc); initdata->parallels[proc] = -1; maxchipno = 0; bitforce_cmd1(fdDev, proc, pdevbuf, sizeof(pdevbuf), "ZCX"); for (int i = 0; (!pdevbuf[0]) && i < 4; ++i) BFgets(pdevbuf, sizeof(pdevbuf), fdDev); for ( ; strncasecmp(pdevbuf, "OK", 2); BFgets(pdevbuf, sizeof(pdevbuf), fdDev) ) { pdevbuf_len = strlen(pdevbuf); if (unlikely(!pdevbuf_len)) continue; pdevbuf[pdevbuf_len-1] = '\0'; // trim newline applog(LOG_DEBUG, " %s", pdevbuf); if (!strncasecmp(pdevbuf, "PROCESSOR ", 10)) maxchipno = max(maxchipno, atoi(&pdevbuf[10])); else if (!strncasecmp(pdevbuf, "CHIP PARALLELIZATION: YES @", 27)) initdata->parallels[proc] = atoi(&pdevbuf[27]); } initdata->parallels[proc] = bitforce_chips_to_plan_for(initdata->parallels[proc], maxchipno); parallel += abs(initdata->parallels[proc]); } BFclose(fdDev); if (unlikely((procs != 1 || parallel != 1) && !initdata->sc)) { // Only bitforce_queue supports parallelization and XLINK, so force SC mode and hope for the best applog(LOG_WARNING, "SC features detected with non-SC device; this is not supported!"); initdata->sc = true; } // We have a real BitForce! bitforce = calloc(1, sizeof(*bitforce)); bitforce->drv = &bitforce_drv; if (initdata->sc) bitforce->drv = &bitforce_queue_api; bitforce->device_path = strdup(devpath); if (manuf) bitforce->dev_manufacturer = manuf; bitforce->deven = DEV_ENABLED; bitforce->procs = parallel; bitforce->threads = 1; if (initdata->sc) bitforce->cutofftemp = 85; if (likely((!memcmp(pdevbuf, ">>>ID: ", 7)) && (s = strstr(pdevbuf + 3, ">>>")))) { s[0] = '\0'; bitforce->name = strdup(pdevbuf + 7); } bitforce->device_data = initdata; mutex_init(&bitforce->device_mutex); return add_cgpu(bitforce); } static bool bitforce_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, bitforce_detect_one); } struct bitforce_data { int xlink_id; unsigned char next_work_ob[70]; // Data aligned for 32-bit access unsigned char *next_work_obs; // Start of data to send unsigned char next_work_obsz; const char *next_work_cmd; char noncebuf[14 + ((BITFORCE_MAX_QRESULTS+1) * BITFORCE_QRESULT_LINE_LEN)]; int poll_func; enum bitforce_proto proto; bool sc; int queued; int queued_max; int parallel; bool parallel_protocol; bool missing_zwx; bool already_have_results; bool just_flushed; int ready_to_queue; bool want_to_send_queue; unsigned result_busy_polled; unsigned sleep_ms_default; struct timeval tv_hashmeter_start; float temp[2]; long *volts; int volts_count; bool probed; bool supports_fanspeed; }; struct bitforce_proc_data { struct cgpu_info *cgpu; bool handles_board; // The first processor handles the queue for the entire board }; static void bitforce_clear_buffer(struct cgpu_info *); static void bitforce_comm_error(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; int *p_fdDev = &bitforce->device->device_fd; data->noncebuf[0] = '\0'; applog(LOG_ERR, "%"PRIpreprv": Comms error", bitforce->proc_repr); dev_error(bitforce, REASON_DEV_COMMS_ERROR); inc_hw_errors_only(thr); BFclose(*p_fdDev); int fd = *p_fdDev = BFopen(bitforce->device_path); if (fd == -1) { applog(LOG_ERR, "%s: Error reopening %s", bitforce->dev_repr, bitforce->device_path); return; } /* empty read buffer */ bitforce_clear_buffer(bitforce); } static bool bitforce_thread_prepare(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; int fdDev = BFopen(bitforce->device_path); if (unlikely(fdDev == -1)) { applog(LOG_ERR, "%s: Failed to open %s", bitforce->dev_repr, bitforce->device_path); return false; } bitforce->device_fd = fdDev; applog(LOG_INFO, "%s: Opened %s", bitforce->dev_repr, bitforce->device_path); return true; } static void __bitforce_clear_buffer(int fdDev) { char pdevbuf[0x100]; int count = 0; do { pdevbuf[0] = '\0'; BFgets(pdevbuf, sizeof(pdevbuf), fdDev); } while (pdevbuf[0] && (++count < 10)); } static void bitforce_clear_buffer(struct cgpu_info *bitforce) { pthread_mutex_t *mutexp = &bitforce->device->device_mutex; int fdDev; mutex_lock(mutexp); fdDev = bitforce->device->device_fd; if (fdDev) { applog(LOG_DEBUG, "%"PRIpreprv": Clearing read buffer", bitforce->proc_repr); __bitforce_clear_buffer(fdDev); } mutex_unlock(mutexp); } void work_list_del(struct work **head, struct work *); void bitforce_reinit(struct cgpu_info *bitforce) { struct bitforce_data *data = bitforce->device_data; struct thr_info *thr = bitforce->thr[0]; struct bitforce_proc_data *procdata = thr->cgpu_data; const char *devpath = bitforce->device_path; pthread_mutex_t *mutexp = &bitforce->device->device_mutex; int *p_fdDev = &bitforce->device->device_fd; int fdDev, retries = 0; char pdevbuf[0x100]; char *s; if (!procdata->handles_board) return; mutex_lock(mutexp); fdDev = *p_fdDev; applog(LOG_WARNING, "%"PRIpreprv": Re-initialising", bitforce->proc_repr); if (fdDev) { BFclose(fdDev); cgsleep_ms(5000); *p_fdDev = 0; } fdDev = BFopen(devpath); if (unlikely(fdDev == -1)) { mutex_unlock(mutexp); applog(LOG_ERR, "%s: Failed to open %s", bitforce->dev_repr, devpath); return; } __bitforce_clear_buffer(fdDev); do { bitforce_cmd1(fdDev, 0, pdevbuf, sizeof(pdevbuf), "ZGX"); if (unlikely(!pdevbuf[0])) { mutex_unlock(mutexp); BFclose(fdDev); applog(LOG_ERR, "%s: Error reading/timeout (ZGX)", bitforce->dev_repr); return; } if (retries++) cgsleep_ms(10); } while (strstr(pdevbuf, "BUSY") && (retries * 10 < BITFORCE_TIMEOUT_MS)); if (unlikely(!strstr(pdevbuf, "SHA256"))) { mutex_unlock(mutexp); BFclose(fdDev); applog(LOG_ERR, "%s: Didn't recognise BitForce on %s returned: %s", bitforce->dev_repr, devpath, pdevbuf); return; } if (likely((!memcmp(pdevbuf, ">>>ID: ", 7)) && (s = strstr(pdevbuf + 3, ">>>")))) { s[0] = '\0'; free((void*)bitforce->name); bitforce->name = strdup(pdevbuf + 7); } *p_fdDev = fdDev; bitforce->sleep_ms = data->sleep_ms_default; if (bitforce->drv == &bitforce_queue_api) { struct work *work, *tmp; timer_set_delay_from_now(&thr->tv_poll, 0); notifier_wake(thr->notifier); bitforce_cmd1(fdDev, data->xlink_id, pdevbuf, sizeof(pdevbuf), "ZQX"); DL_FOREACH_SAFE(thr->work_list, work, tmp) work_list_del(&thr->work_list, work); data->queued = 0; data->ready_to_queue = 0; data->already_have_results = false; data->just_flushed = true; thr->queue_full = false; } mutex_unlock(mutexp); } static void bitforce_flash_led(struct cgpu_info *bitforce) { struct bitforce_data *data = bitforce->device_data; pthread_mutex_t *mutexp = &bitforce->device->device_mutex; int fdDev = bitforce->device->device_fd; if (!fdDev) return; /* Do not try to flash the led if we're polling for a result to * minimise the chance of interleaved results */ if (bitforce->polling) return; /* It is not critical flashing the led so don't get stuck if we * can't grab the mutex here */ if (mutex_trylock(mutexp)) return; char pdevbuf[0x100]; bitforce_cmd1(fdDev, data->xlink_id, pdevbuf, sizeof(pdevbuf), "ZMX"); /* Once we've tried - don't do it until told to again */ bitforce->flash_led = false; /* However, this stops anything else getting a reply * So best to delay any other access to the BFL */ cgsleep_ms(4000); mutex_unlock(mutexp); return; // nothing is returned by the BFL } static float my_strtof(const char *nptr, char **endptr) { float f = strtof(nptr, endptr); /* Cope with older software that breaks and reads nonsense * values */ if (f > 100) f = strtod(nptr, endptr); return f; } static void set_float_if_gt_zero(float *var, float value) { if (value > 0) *var = value; } static bool bitforce_get_temp(struct cgpu_info *bitforce) { struct bitforce_data *data = bitforce->device_data; pthread_mutex_t *mutexp = &bitforce->device->device_mutex; int fdDev = bitforce->device->device_fd; char pdevbuf[0x40]; char voltbuf[0x40]; char *s; struct cgpu_info *chip_cgpu; if (!fdDev) return false; /* Do not try to get the temperature if we're polling for a result to * minimise the chance of interleaved results */ if (bitforce->polling) return true; // Flash instead of Temp - doing both can be too slow if (bitforce->flash_led) { bitforce_flash_led(bitforce); return true; } /* It is not critical getting temperature so don't get stuck if we * can't grab the mutex here */ if (mutex_trylock(mutexp)) return false; if (data->sc) { if (unlikely(!data->probed)) { bitforce_cmd1(fdDev, data->xlink_id, voltbuf, sizeof(voltbuf), "Z9X"); if (strncasecmp(voltbuf, "ERR", 3)) data->supports_fanspeed = true; data->probed = true; } bitforce_cmd1(fdDev, data->xlink_id, voltbuf, sizeof(voltbuf), "ZTX"); } bitforce_cmd1(fdDev, data->xlink_id, pdevbuf, sizeof(pdevbuf), "ZLX"); mutex_unlock(mutexp); if (data->sc && likely(voltbuf[0])) { // Process voltage info // "NNNxxx,NNNxxx,NNNxxx" int n = 1; for (char *p = voltbuf; p[0]; ++p) if (p[0] == ',') ++n; long *out = malloc(sizeof(long) * n); if (!out) goto skipvolts; n = 0; char *saveptr, *v; for (v = strtok_r(voltbuf, ",", &saveptr); v; v = strtok_r(NULL, ",", &saveptr)) out[n++] = strtol(v, NULL, 10); data->volts_count = 0; free(data->volts); data->volts = out; data->volts_count = n; } skipvolts: if (unlikely(!pdevbuf[0])) { struct thr_info *thr = bitforce->thr[0]; applog(LOG_ERR, "%"PRIpreprv": Error: Get temp returned empty string/timed out", bitforce->proc_repr); inc_hw_errors_only(thr); return false; } if ((!strncasecmp(pdevbuf, "TEMP", 4)) && (s = strchr(pdevbuf + 4, ':'))) { float temp = my_strtof(s + 1, &s); set_float_if_gt_zero(&data->temp[0], temp); for ( ; s[0]; ++s) { if (!strncasecmp(s, "TEMP", 4) && (s = strchr(&s[4], ':'))) { float temp2 = my_strtof(s + 1, &s); set_float_if_gt_zero(&data->temp[1], temp2); if (temp2 > temp) temp = temp2; } } if (temp > 0) { chip_cgpu = bitforce; for (int i = 0; i < data->parallel; ++i, (chip_cgpu = chip_cgpu->next_proc)) chip_cgpu->temp = temp; } } else { struct thr_info *thr = bitforce->thr[0]; /* Use the temperature monitor as a kind of watchdog for when * our responses are out of sync and flush the buffer to * hopefully recover */ applog(LOG_WARNING, "%"PRIpreprv": Garbled response probably throttling, clearing buffer", bitforce->proc_repr); dev_error(bitforce, REASON_DEV_THROTTLE); /* Count throttling episodes as hardware errors */ inc_hw_errors_only(thr); bitforce_clear_buffer(bitforce); return false; } return true; } static inline void dbg_block_data(struct cgpu_info *bitforce) { if (!opt_debug) return; struct bitforce_data *data = bitforce->device_data; char s[89]; bin2hex(s, &data->next_work_ob[8], 44); applog(LOG_DEBUG, "%"PRIpreprv": block data: %s", bitforce->proc_repr, s); } static void bitforce_change_mode(struct cgpu_info *, enum bitforce_proto); static bool bitforce_job_prepare(struct thr_info *thr, struct work *work, __maybe_unused uint64_t max_nonce) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; int fdDev = bitforce->device->device_fd; unsigned char *ob_ms = &data->next_work_ob[8]; unsigned char *ob_dt = &ob_ms[32]; // If polling job_start, cancel it if (data->poll_func == 1) { thr->tv_poll.tv_sec = -1; data->poll_func = 0; } memcpy(ob_ms, work->midstate, 32); memcpy(ob_dt, work->data + 64, 12); switch (data->proto) { case BFP_BQUEUE: quithere(1, "%"PRIpreprv": Impossible BFP_BQUEUE", bitforce->proc_repr); case BFP_PQUEUE: quithere(1, "%"PRIpreprv": Impossible BFP_PQUEUE", bitforce->proc_repr); case BFP_RANGE: { uint32_t *ob_nonce = (uint32_t*)&(ob_dt[32]); ob_nonce[0] = htobe32(work->blk.nonce); ob_nonce[1] = htobe32(work->blk.nonce + bitforce->nonces); // FIXME: if nonce range fails... we didn't increment enough work->blk.nonce += bitforce->nonces + 1; break; } case BFP_QUEUE: if (thr->work) { pthread_mutex_t *mutexp = &bitforce->device->device_mutex; char pdevbuf[0x100]; if (unlikely(!fdDev)) return false; mutex_lock(mutexp); if (data->queued) bitforce_cmd1(fdDev, data->xlink_id, pdevbuf, sizeof(pdevbuf), "ZQX"); bitforce_cmd2(fdDev, data->xlink_id, pdevbuf, sizeof(pdevbuf), data->next_work_cmd, data->next_work_obs, data->next_work_obsz); mutex_unlock(mutexp); if (unlikely(strncasecmp(pdevbuf, "OK", 2))) { applog(LOG_WARNING, "%"PRIpreprv": Does not support work queue, disabling", bitforce->proc_repr); bitforce_change_mode(bitforce, BFP_WORK); } else { dbg_block_data(bitforce); data->queued = 1; } } // fallthru... case BFP_WORK: work->blk.nonce = 0xffffffff; } return true; } static void bitforce_change_mode(struct cgpu_info *bitforce, enum bitforce_proto proto) { struct bitforce_data *data = bitforce->device_data; if (data->proto == proto) return; if (data->proto == BFP_RANGE) { bitforce->nonces = 0xffffffff; bitforce->sleep_ms *= 5; data->sleep_ms_default *= 5; switch (proto) { case BFP_WORK: data->next_work_cmd = "ZDX"; break; case BFP_QUEUE: data->next_work_cmd = "ZNX"; default: ; } if (data->sc) { // "S|---------- MidState ----------||-DataTail-|E" data->next_work_ob[7] = 45; data->next_work_ob[8+32+12] = '\xAA'; data->next_work_obsz = 46; } else { // ">>>>>>>>|---------- MidState ----------||-DataTail-|>>>>>>>>" memset(&data->next_work_ob[8+32+12], '>', 8); data->next_work_obsz = 60; } } else if (proto == BFP_RANGE) { /* Split work up into 1/5th nonce ranges */ bitforce->nonces = 0x33333332; bitforce->sleep_ms /= 5; data->sleep_ms_default /= 5; data->next_work_cmd = "ZPX"; if (data->sc) { data->next_work_ob[7] = 53; data->next_work_obsz = 54; } else data->next_work_obsz = 68; } data->proto = proto; bitforce->kname = protonames[proto]; } static void bitforce_job_start(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; pthread_mutex_t *mutexp = &bitforce->device->device_mutex; int fdDev = bitforce->device->device_fd; unsigned char *ob = data->next_work_obs; char pdevbuf[0x100]; struct timeval tv_now; data->result_busy_polled = 0; if (data->queued) { uint32_t delay; // get_results collected more accurate job start time mt_job_transition(thr); job_start_complete(thr); data->queued = 0; delay = (uint32_t)bitforce->sleep_ms * 1000; if (unlikely(data->already_have_results)) delay = 0; timer_set_delay(&thr->tv_morework, &bitforce->work_start_tv, delay); return; } if (!fdDev) goto commerr; re_send: mutex_lock(mutexp); bitforce_cmd2(fdDev, data->xlink_id, pdevbuf, sizeof(pdevbuf), data->next_work_cmd, ob, data->next_work_obsz); if (!pdevbuf[0] || !strncasecmp(pdevbuf, "B", 1)) { mutex_unlock(mutexp); cgtime(&tv_now); timer_set_delay(&thr->tv_poll, &tv_now, WORK_CHECK_INTERVAL_MS * 1000); data->poll_func = 1; return; } else if (unlikely(strncasecmp(pdevbuf, "OK", 2))) { mutex_unlock(mutexp); switch (data->proto) { case BFP_RANGE: applog(LOG_WARNING, "%"PRIpreprv": Does not support nonce range, disabling", bitforce->proc_repr); bitforce_change_mode(bitforce, BFP_WORK); goto re_send; case BFP_QUEUE: applog(LOG_WARNING, "%"PRIpreprv": Does not support work queue, disabling", bitforce->proc_repr); bitforce_change_mode(bitforce, BFP_WORK); goto re_send; default: ; } applog(LOG_ERR, "%"PRIpreprv": Error: Send work reports: %s", bitforce->proc_repr, pdevbuf); goto commerr; } mt_job_transition(thr); mutex_unlock(mutexp); dbg_block_data(bitforce); cgtime(&tv_now); bitforce->work_start_tv = tv_now; timer_set_delay(&thr->tv_morework, &tv_now, bitforce->sleep_ms * 1000); job_start_complete(thr); return; commerr: bitforce_comm_error(thr); job_start_abort(thr, true); } static char _discardedbuf[0x10]; static int bitforce_zox(struct thr_info *thr, const char *cmd) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; pthread_mutex_t *mutexp = &bitforce->device->device_mutex; int fd = bitforce->device->device_fd; char *pdevbuf = &data->noncebuf[0]; int count; mutex_lock(mutexp); bitforce_cmd1(fd, data->xlink_id, pdevbuf, sizeof(data->noncebuf), cmd); if (!strncasecmp(pdevbuf, "INPROCESS:", 10)) BFgets(pdevbuf, sizeof(data->noncebuf), fd); if (!strncasecmp(pdevbuf, "COUNT:", 6)) { count = atoi(&pdevbuf[6]); size_t cls = strlen(pdevbuf); char *pmorebuf = &pdevbuf[cls]; size_t szleft = sizeof(data->noncebuf) - cls, sz; if (count && data->queued) cgtime(&bitforce->work_start_tv); while (true) { BFgets(pmorebuf, szleft, fd); if (!strncasecmp(pmorebuf, "OK", 2)) { pmorebuf[0] = '\0'; // process expects only results break; } sz = strlen(pmorebuf); if (!sz) { applog(LOG_ERR, "%"PRIpreprv": Timeout during %s", bitforce->proc_repr, cmd); break; } szleft -= sz; pmorebuf += sz; if (unlikely(szleft < BITFORCE_QRESULT_LINE_LEN)) { // Out of buffer space somehow :( applog(LOG_ERR, "%"PRIpreprv": Ran out of buffer space for results, discarding extra data", bitforce->proc_repr); pmorebuf = _discardedbuf; szleft = sizeof(_discardedbuf); } } } else count = -1; mutex_unlock(mutexp); return count; } static inline char *next_line(char *); static void bitforce_job_get_results(struct thr_info *thr, struct work *work) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; int fdDev = bitforce->device->device_fd; unsigned int delay_time_ms; struct timeval elapsed; struct timeval now; char *pdevbuf = &data->noncebuf[0]; bool stale; int count; cgtime(&now); timersub(&now, &bitforce->work_start_tv, &elapsed); bitforce->wait_ms = tv_to_ms(elapsed); bitforce->polling = true; if (!fdDev) goto commerr; stale = stale_work(work, true); if (unlikely(bitforce->wait_ms < bitforce->sleep_ms)) { // We're likely here because of a work restart // Since Bitforce cannot stop a work without losing results, only do it if the current job is finding stale shares // BFP_QUEUE does not support stopping work at all if (data->proto == BFP_QUEUE || !stale) { delay_time_ms = bitforce->sleep_ms - bitforce->wait_ms; timer_set_delay(&thr->tv_poll, &now, delay_time_ms * 1000); data->poll_func = 2; return; } } while (1) { if (data->already_have_results) { data->already_have_results = false; strcpy(pdevbuf, "COUNT:0"); count = 1; break; } const char *cmd = (data->proto == BFP_QUEUE) ? "ZOX" : "ZFX"; count = bitforce_zox(thr, cmd); cgtime(&now); timersub(&now, &bitforce->work_start_tv, &elapsed); if (elapsed.tv_sec >= BITFORCE_LONG_TIMEOUT_S) { applog(LOG_ERR, "%"PRIpreprv": took %lums - longer than %lums", bitforce->proc_repr, tv_to_ms(elapsed), (unsigned long)BITFORCE_LONG_TIMEOUT_MS); goto out; } if (count > 0) { // Check that queue results match the current work // Also, if there are results from the next work, short-circuit this wait unsigned char midstate[32], datatail[12]; char *p; int i; p = pdevbuf; for (i = 0; i < count; ++i) { p = next_line(p); hex2bin(midstate, p, 32); hex2bin(datatail, &p[65], 12); if (!(memcmp(work->midstate, midstate, 32) || memcmp(&work->data[64], datatail, 12))) break; } if (i == count) { // Didn't find the one we're waiting on // Must be extra stuff in the queue results char xmid[65]; char xdt[25]; bin2hex(xmid, work->midstate, 32); bin2hex(xdt, &work->data[64], 12); applog(LOG_WARNING, "%"PRIpreprv": Found extra garbage in queue results: %s", bitforce->proc_repr, pdevbuf); applog(LOG_WARNING, "%"PRIpreprv": ...while waiting on: %s,%s", bitforce->proc_repr, xmid, xdt); count = 0; } else if (i == count - 1) // Last one found is what we're looking for {} else // We finished the next job too! data->already_have_results = true; } if (!count) goto noqr; if (pdevbuf[0] && strncasecmp(pdevbuf, "B", 1)) /* BFL does not respond during throttling */ break; data->result_busy_polled = bitforce->wait_ms; if (stale && data->proto != BFP_QUEUE) { applog(LOG_NOTICE, "%"PRIpreprv": Abandoning stale search to restart", bitforce->proc_repr); goto out; } noqr: data->result_busy_polled = bitforce->wait_ms; /* if BFL is throttling, no point checking so quickly */ delay_time_ms = (pdevbuf[0] ? BITFORCE_CHECK_INTERVAL_MS : 2 * WORK_CHECK_INTERVAL_MS); timer_set_delay(&thr->tv_poll, &now, delay_time_ms * 1000); data->poll_func = 2; return; } if (count < 0 && pdevbuf[0] == 'N') count = strncasecmp(pdevbuf, "NONCE-FOUND", 11) ? 1 : 0; // At this point, 'count' is: // negative, in case of some kind of error // zero, if NO-NONCE (FPGA either completed with no results, or rebooted) // positive, if at least one job completed successfully if (elapsed.tv_sec > BITFORCE_TIMEOUT_S) { applog(LOG_ERR, "%"PRIpreprv": took %lums - longer than %lums", bitforce->proc_repr, tv_to_ms(elapsed), (unsigned long)BITFORCE_TIMEOUT_MS); dev_error(bitforce, REASON_DEV_OVER_HEAT); inc_hw_errors_only(thr); /* If the device truly throttled, it didn't process the job and there * are no results. But check first, just in case we're wrong about it * throttling. */ if (count > 0) goto out; } else if (count >= 0) {/* Hashing complete (NONCE-FOUND or NO-NONCE) */ /* Simple timing adjustment. Allow a few polls to cope with * OS timer delays being variably reliable. wait_ms will * always equal sleep_ms when we've waited greater than or * equal to the result return time.*/ delay_time_ms = bitforce->sleep_ms; if (!data->result_busy_polled) { // No busy polls before results received if (bitforce->wait_ms > delay_time_ms + (WORK_CHECK_INTERVAL_MS * 8)) // ... due to poll being rather late; ignore it as an anomaly applog(LOG_DEBUG, "%"PRIpreprv": Got results on first poll after %ums, later than scheduled %ums (ignoring)", bitforce->proc_repr, bitforce->wait_ms, delay_time_ms); else if (bitforce->sleep_ms > data->sleep_ms_default + (BITFORCE_CHECK_INTERVAL_MS * 0x20)) { applog(LOG_DEBUG, "%"PRIpreprv": Got results on first poll after %ums, on delayed schedule %ums; Wait time changed to: %ums (default sch)", bitforce->proc_repr, bitforce->wait_ms, delay_time_ms, data->sleep_ms_default); bitforce->sleep_ms = data->sleep_ms_default; } else { applog(LOG_DEBUG, "%"PRIpreprv": Got results on first poll after %ums, on default schedule %ums; Wait time changed to: %ums (check interval)", bitforce->proc_repr, bitforce->wait_ms, delay_time_ms, BITFORCE_CHECK_INTERVAL_MS); bitforce->sleep_ms = BITFORCE_CHECK_INTERVAL_MS; } } else { if (data->result_busy_polled - bitforce->sleep_ms > WORK_CHECK_INTERVAL_MS) { bitforce->sleep_ms = data->result_busy_polled - (WORK_CHECK_INTERVAL_MS / 2); applog(LOG_DEBUG, "%"PRIpreprv": Got results on Nth poll after %ums (busy poll at %ums, sch'd %ums); Wait time changed to: %ums", bitforce->proc_repr, bitforce->wait_ms, data->result_busy_polled, delay_time_ms, bitforce->sleep_ms); } else applog(LOG_DEBUG, "%"PRIpreprv": Got results on Nth poll after %ums (busy poll at %ums, sch'd %ums); Wait time unchanged", bitforce->proc_repr, bitforce->wait_ms, data->result_busy_polled, delay_time_ms); } /* Work out the average time taken. Float for calculation, uint for display */ bitforce->avg_wait_f += (tv_to_ms(elapsed) - bitforce->avg_wait_f) / TIME_AVG_CONSTANT; bitforce->avg_wait_d = (unsigned int) (bitforce->avg_wait_f + 0.5); } applog(LOG_DEBUG, "%"PRIpreprv": waited %dms until %s", bitforce->proc_repr, bitforce->wait_ms, pdevbuf); if (count < 0 && strncasecmp(pdevbuf, "I", 1)) { inc_hw_errors_only(thr); applog(LOG_WARNING, "%"PRIpreprv": Error: Get result reports: %s", bitforce->proc_repr, pdevbuf); bitforce_clear_buffer(bitforce); } out: bitforce->polling = false; job_results_fetched(thr); return; commerr: bitforce_comm_error(thr); goto out; } static void bitforce_process_result_nonces(struct thr_info *thr, struct work *work, char *pnoncebuf) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; uint32_t nonce; while (1) { hex2bin((void*)&nonce, pnoncebuf, 4); nonce = be32toh(nonce); if (unlikely(data->proto == BFP_RANGE && (nonce >= work->blk.nonce || /* FIXME: blk.nonce is probably moved on quite a bit now! */ (work->blk.nonce > 0 && nonce < work->blk.nonce - bitforce->nonces - 1)))) { applog(LOG_WARNING, "%"PRIpreprv": Disabling broken nonce range support", bitforce->proc_repr); bitforce_change_mode(bitforce, BFP_WORK); } submit_nonce(thr, work, nonce); if (strncmp(&pnoncebuf[8], ",", 1)) break; pnoncebuf += 9; } } static bool bitforce_process_qresult_line_i(struct thr_info *thr, char *midstate, char *datatail, char *buf, struct work *work) { if (!work) return false; if (memcmp(work->midstate, midstate, 32)) return false; if (memcmp(&work->data[64], datatail, 12)) return false; char *end; if (strtol(&buf[90], &end, 10)) bitforce_process_result_nonces(thr, work, &end[1]); return true; } static void bitforce_process_qresult_line(struct thr_info *thr, char *buf, struct work *work) { struct cgpu_info *bitforce = thr->cgpu; char midstate[32], datatail[12]; hex2bin((void*)midstate, buf, 32); hex2bin((void*)datatail, &buf[65], 12); if (!( bitforce_process_qresult_line_i(thr, midstate, datatail, buf, work) || bitforce_process_qresult_line_i(thr, midstate, datatail, buf, thr->work) || bitforce_process_qresult_line_i(thr, midstate, datatail, buf, thr->prev_work) || bitforce_process_qresult_line_i(thr, midstate, datatail, buf, thr->next_work) )) { applog(LOG_ERR, "%"PRIpreprv": Failed to find work for queued results", bitforce->proc_repr); inc_hw_errors_only(thr); } } static inline char *next_line(char *in) { while (in[0] && (in++)[0] != '\n') {} return in; } static int64_t bitforce_job_process_results(struct thr_info *thr, struct work *work, __maybe_unused bool stopping) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; char *pnoncebuf = &data->noncebuf[0]; int count; if (!strncasecmp(pnoncebuf, "NO-", 3)) return bitforce->nonces; /* No valid nonce found */ if (!strncasecmp(pnoncebuf, "NONCE-FOUND", 11)) { bitforce_process_result_nonces(thr, work, &pnoncebuf[12]); count = 1; } else if (!strncasecmp(pnoncebuf, "COUNT:", 6)) { count = 0; pnoncebuf = next_line(pnoncebuf); while (pnoncebuf[0]) { bitforce_process_qresult_line(thr, pnoncebuf, work); ++count; pnoncebuf = next_line(pnoncebuf); } } else return 0; // FIXME: This might have changed in the meantime (new job start, or broken) return bitforce->nonces * count; } static void bitforce_shutdown(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; int *p_fdDev = &bitforce->device->device_fd; BFclose(*p_fdDev); *p_fdDev = 0; } static void biforce_thread_enable(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; bitforce_reinit(bitforce); } static bool bitforce_get_stats(struct cgpu_info *bitforce) { struct bitforce_proc_data *procdata = bitforce->thr[0]->cgpu_data; if (!procdata->handles_board) return true; return bitforce_get_temp(bitforce); } static bool bitforce_identify(struct cgpu_info *bitforce) { bitforce->flash_led = true; return true; } static bool bitforce_thread_init(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; unsigned int wait; struct bitforce_data *data; struct bitforce_proc_data *procdata; struct bitforce_init_data *initdata = bitforce->device_data; bool sc = initdata->sc; int xlink_id = 0, boardno = 0; struct bitforce_proc_data *first_on_this_board; char buf[100]; int fd = bitforce->device_fd; for ( ; bitforce; bitforce = bitforce->next_proc) { thr = bitforce->thr[0]; if (unlikely(xlink_id > 30)) { applog(LOG_ERR, "%"PRIpreprv": Failed to find XLINK address", bitforce->proc_repr); dev_error(bitforce, REASON_THREAD_FAIL_INIT); bitforce->reinit_backoff = 1e10; continue; } bitforce->sleep_ms = BITFORCE_SLEEP_MS; bitforce->device_data = data = malloc(sizeof(*data)); *data = (struct bitforce_data){ .xlink_id = xlink_id, .next_work_ob = ">>>>>>>>|---------- MidState ----------||-DataTail-||Nonces|>>>>>>>>", .proto = BFP_RANGE, .sc = sc, .sleep_ms_default = BITFORCE_SLEEP_MS, .parallel = abs(initdata->parallels[boardno]), .parallel_protocol = (initdata->parallels[boardno] != -1), }; thr->cgpu_data = procdata = malloc(sizeof(*procdata)); *procdata = (struct bitforce_proc_data){ .handles_board = true, .cgpu = bitforce, }; if (sc) { // ".......S|---------- MidState ----------||-DataTail-||Nonces|E" data->next_work_ob[8+32+12+8] = '\xAA'; data->next_work_obs = &data->next_work_ob[7]; if (bitforce->drv == &bitforce_queue_api) { bitforce_change_mode(bitforce, data->parallel_protocol ? BFP_PQUEUE : BFP_BQUEUE); bitforce->sleep_ms = data->sleep_ms_default = 100; timer_set_delay_from_now(&thr->tv_poll, 0); data->queued_max = data->parallel * 2; if (data->queued_max < BITFORCE_MIN_QUEUED_MAX) data->queued_max = BITFORCE_MIN_QUEUED_MAX; if (data->queued_max > BITFORCE_MAX_QUEUED_MAX) data->queued_max = BITFORCE_MAX_QUEUED_MAX; } else bitforce_change_mode(bitforce, BFP_QUEUE); // Clear job queue to start fresh; ignore response bitforce_cmd1(fd, data->xlink_id, buf, sizeof(buf), "ZQX"); } else { data->next_work_obs = &data->next_work_ob[0]; // Unconditionally change away from cold-initialized BFP_RANGE, to allow for setting up other variables bitforce_change_mode(bitforce, BFP_WORK); /* Initially enable support for nonce range and disable it later if it * fails */ if (opt_bfl_noncerange) bitforce_change_mode(bitforce, BFP_RANGE); } bitforce->status = LIFE_INIT2; first_on_this_board = procdata; for (int proc = 1; proc < data->parallel; ++proc) { bitforce = bitforce->next_proc; assert(bitforce); thr = bitforce->thr[0]; thr->queue_full = true; thr->cgpu_data = procdata = malloc(sizeof(*procdata)); *procdata = *first_on_this_board; procdata->handles_board = false; procdata->cgpu = bitforce; bitforce->device_data = data; bitforce->status = LIFE_INIT2; bitforce->kname = first_on_this_board->cgpu->kname; } applog(LOG_DEBUG, "%s: Board %d: %"PRIpreprv"-%"PRIpreprv, bitforce->dev_repr, boardno, first_on_this_board->cgpu->proc_repr, bitforce->proc_repr); ++boardno; while (xlink_id < 31 && !(initdata->devmask & (1 << ++xlink_id))) {} } // NOTE: This doesn't restore the first processor, but it does get us the last one; this is sufficient for the delay debug and start of the next loop below bitforce = thr->cgpu; free(initdata->parallels); free(initdata); /* Pause each new thread at least 100ms between initialising * so the devices aren't making calls all at the same time. */ wait = thr->id * MAX_START_DELAY_MS; applog(LOG_DEBUG, "%s: Delaying start by %dms", bitforce->dev_repr, wait / 1000); cgsleep_ms(wait); if (sc) { // Clear results queue last, to start fresh; ignore response for (bitforce = bitforce->device; bitforce; bitforce = bitforce->next_proc) bitforce_zox(thr, "ZOX"); } return true; } #ifdef HAVE_CURSES static void bitforce_tui_wlogprint_choices(struct cgpu_info *cgpu) { struct bitforce_data *data = cgpu->device_data; if (data->supports_fanspeed) wlogprint("[F]an control "); } static const char *bitforce_tui_handle_choice(struct cgpu_info *cgpu, int input) { struct bitforce_data *data = cgpu->device_data; pthread_mutex_t *mutexp; int fd; static char replybuf[0x100]; if (!data->supports_fanspeed) return NULL; switch (input) { case 'f': case 'F': { int fanspeed; char *intvar; intvar = curses_input("Set fan speed (range 0-5 for low to fast or 9 for auto)"); if (!intvar) return "Invalid fan speed\n"; fanspeed = atoi(intvar); free(intvar); if ((fanspeed < 0 || fanspeed > 5) && fanspeed != 9) return "Invalid fan speed\n"; char cmd[4] = "Z0X"; cmd[1] += fanspeed; mutexp = &cgpu->device->device_mutex; mutex_lock(mutexp); fd = cgpu->device->device_fd; bitforce_cmd1(fd, data->xlink_id, replybuf, sizeof(replybuf), cmd); mutex_unlock(mutexp); return replybuf; } } return NULL; } static void bitforce_wlogprint_status(struct cgpu_info *cgpu) { struct bitforce_data *data = cgpu->device_data; if (data->temp[0] > 0 && data->temp[1] > 0) wlogprint("Temperatures: %4.1fC %4.1fC\n", data->temp[0], data->temp[1]); if (data->volts_count) { // -> "NNN.xxx / NNN.xxx / NNN.xxx" size_t sz = (data->volts_count * 10) + 1; char buf[sz]; char *s = buf; int rv = 0; for (int i = 0; i < data->volts_count; ++i) { long v = data->volts[i]; _SNP("%ld.%03d / ", v / 1000, (int)(v % 1000)); } if (rv >= 3 && s[-2] == '/') s[-3] = '\0'; wlogprint("Voltages: %s\n", buf); } } #endif static struct api_data *bitforce_drv_stats(struct cgpu_info *cgpu) { struct bitforce_data *data = cgpu->device_data; struct api_data *root = NULL; // Warning, access to these is not locked - but we don't really // care since hashing performance is way more important than // locking access to displaying API debug 'stats' // If locking becomes an issue for any of them, use copy_data=true also root = api_add_uint(root, "Sleep Time", &(cgpu->sleep_ms), false); if (data->proto != BFP_BQUEUE && data->proto != BFP_PQUEUE) root = api_add_uint(root, "Avg Wait", &(cgpu->avg_wait_d), false); if (data->temp[0] > 0 && data->temp[1] > 0) { root = api_add_temp(root, "Temperature0", &(data->temp[0]), false); root = api_add_temp(root, "Temperature1", &(data->temp[1]), false); } for (int i = 0; i < data->volts_count; ++i) { float voltage = data->volts[i]; char key[] = "VoltageNN"; snprintf(&key[7], 3, "%d", i); voltage /= 1e3; root = api_add_volts(root, key, &voltage, true); } return root; } void bitforce_poll(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; int poll = data->poll_func; thr->tv_poll.tv_sec = -1; data->poll_func = 0; switch (poll) { case 1: bitforce_job_start(thr); break; case 2: bitforce_job_get_results(thr, thr->work); break; default: applog(LOG_ERR, "%"PRIpreprv": Unexpected poll from device API!", thr->cgpu->proc_repr); } } static char *bitforce_set_device(struct cgpu_info *proc, char *option, char *setting, char *replybuf) { struct bitforce_data *data = proc->device_data; pthread_mutex_t *mutexp = &proc->device->device_mutex; int fd; if (!strcasecmp(option, "help")) { sprintf(replybuf, "fanmode: range 0-5 (low to fast) or 9 (auto)"); return replybuf; } if (!strcasecmp(option, "fanmode")) { if (!data->supports_fanspeed) { sprintf(replybuf, "fanmode not supported"); return replybuf; } if (!setting || !*setting) { sprintf(replybuf, "missing fanmode setting"); return replybuf; } if (setting[1] || ((setting[0] < '0' || setting[0] > '5') && setting[0] != '9')) { sprintf(replybuf, "invalid fanmode setting"); return replybuf; } char cmd[4] = "Z5X"; cmd[1] = setting[0]; mutex_lock(mutexp); fd = proc->device->device_fd; bitforce_cmd1(fd, data->xlink_id, replybuf, 256, cmd); mutex_unlock(mutexp); return replybuf; } if (!strcasecmp(option, "_cmd1")) { mutex_lock(mutexp); fd = proc->device->device_fd; bitforce_cmd1b(fd, data->xlink_id, replybuf, 8000, setting, strlen(setting)); mutex_unlock(mutexp); return replybuf; } sprintf(replybuf, "Unknown option: %s", option); return replybuf; } struct device_drv bitforce_drv = { .dname = "bitforce", .name = "BFL", .lowl_match = bitforce_lowl_match, .lowl_probe = bitforce_lowl_probe, #ifdef HAVE_CURSES .proc_wlogprint_status = bitforce_wlogprint_status, .proc_tui_wlogprint_choices = bitforce_tui_wlogprint_choices, .proc_tui_handle_choice = bitforce_tui_handle_choice, #endif .get_api_stats = bitforce_drv_stats, .minerloop = minerloop_async, .reinit_device = bitforce_reinit, .get_stats = bitforce_get_stats, .set_device = bitforce_set_device, .identify_device = bitforce_identify, .thread_prepare = bitforce_thread_prepare, .thread_init = bitforce_thread_init, .job_prepare = bitforce_job_prepare, .job_start = bitforce_job_start, .job_get_results = bitforce_job_get_results, .poll = bitforce_poll, .job_process_results = bitforce_job_process_results, .thread_shutdown = bitforce_shutdown, .thread_enable = biforce_thread_enable }; static inline void bitforce_set_queue_full(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; thr->queue_full = (data->queued + data->ready_to_queue >= data->queued_max) || (data->ready_to_queue >= BITFORCE_MAX_BQUEUE_AT_ONCE); } static bool bitforce_send_queue(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; pthread_mutex_t *mutexp = &bitforce->device->device_mutex; int fd = bitforce->device->device_fd; struct work *work; if (unlikely(!(fd && data->ready_to_queue))) return false; char buf[0x100]; int queued_ok; size_t qjs_sz = (32 + 12 + 2); size_t qjp_sz = 4 + (qjs_sz * data->ready_to_queue); uint8_t qjp[qjp_sz], *qjs; qjp[0] = qjp_sz - 1; qjp[1] = 0xc1; qjp[2] = data->ready_to_queue; qjp[qjp_sz - 1] = 0xfe; qjs = &qjp[qjp_sz - 1]; work = thr->work_list->prev; for (int i = data->ready_to_queue; i > 0; --i, work = work->prev) { *(--qjs) = 0xaa; memcpy(qjs -= 12, work->data + 64, 12); memcpy(qjs -= 32, work->midstate, 32); *(--qjs) = 45; } retry: mutex_lock(mutexp); if (data->missing_zwx) bitforce_cmd2(fd, data->xlink_id, buf, sizeof(buf), "ZNX", &qjp[3], qjp_sz - 4); else bitforce_cmd2(fd, data->xlink_id, buf, sizeof(buf), "ZWX", qjp, qjp_sz); mutex_unlock(mutexp); if (!strncasecmp(buf, "ERR:QUEUE", 9)) { // Queue full :( applog(LOG_DEBUG, "%"PRIpreprv": Device queue full while attempting to append %d jobs (queued<=%d)", bitforce->proc_repr, data->ready_to_queue, data->queued); thr->queue_full = true; return false; } if (strncasecmp(buf, "OK:QUEUED", 9)) { if ((!strncasecmp(buf, "ERROR: UNKNOWN", 11)) && !data->missing_zwx) { applog(LOG_DEBUG, "%"PRIpreprv": Missing ZWX command, trying ZNX", bitforce->proc_repr); data->missing_zwx = true; goto retry; } applog(LOG_DEBUG, "%"PRIpreprv": Unexpected error attempting to append %d jobs (queued<=%d): %s", bitforce->proc_repr, data->ready_to_queue, data->queued, buf); return false; } if (!data->queued) cgtime(&data->tv_hashmeter_start); if (data->missing_zwx) queued_ok = 1; else queued_ok = atoi(&buf[9]); data->queued += queued_ok; applog(LOG_DEBUG, "%"PRIpreprv": Successfully queued %d/%d jobs on device (queued<=%d)", bitforce->proc_repr, queued_ok, data->ready_to_queue, data->queued); data->ready_to_queue -= queued_ok; if (!data->missing_zwx) thr->queue_full = data->ready_to_queue; data->just_flushed = false; data->want_to_send_queue = false; return true; } void work_list_del(struct work **head, struct work *work) { DL_DELETE(*head, work); free_work(work); } static bool bitforce_queue_do_results(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; int fd = bitforce->device->device_fd; int count; int fcount; char *noncebuf, *buf, *end; unsigned char midstate[32], datatail[12]; struct work *work, *tmpwork, *thiswork; struct timeval tv_now, tv_elapsed; long chipno = 0; // Initialized value is used for non-parallelized boards struct cgpu_info *chip_cgpu; struct thr_info *chip_thr; int counts[data->parallel]; if (unlikely(!fd)) return false; again: noncebuf = &data->noncebuf[0]; count = bitforce_zox(thr, "ZOX"); if (unlikely(count < 0)) { applog(LOG_ERR, "%"PRIpreprv": Received unexpected queue result response: %s", bitforce->proc_repr, noncebuf); inc_hw_errors_only(thr); return false; } applog(LOG_DEBUG, "%"PRIpreprv": Received %d queue results on poll (max=%d)", bitforce->proc_repr, count, (int)BITFORCE_MAX_QRESULTS); if (!count) return true; fcount = 0; for (int i = 0; i < data->parallel; ++i) counts[i] = 0; noncebuf = next_line(noncebuf); while ((buf = noncebuf)[0]) { if ( (noncebuf = next_line(buf)) ) noncebuf[-1] = '\0'; if (strlen(buf) <= 90) { applog(LOG_ERR, "%"PRIpreprv": Gibberish within queue results: %s", bitforce->proc_repr, buf); continue; } hex2bin(midstate, buf, 32); hex2bin(datatail, &buf[65], 12); thiswork = NULL; DL_FOREACH(thr->work_list, work) { if (unlikely(memcmp(work->midstate, midstate, 32))) continue; if (unlikely(memcmp(&work->data[64], datatail, 12))) continue; thiswork = work; break; } end = &buf[89]; chip_cgpu = bitforce; if (data->parallel_protocol) { chipno = strtol(&end[1], &end, 16); if (chipno >= data->parallel) { applog(LOG_ERR, "%"PRIpreprv": Chip number out of range for queue result: %s", chip_cgpu->proc_repr, buf); chipno = 0; } for (int i = 0; i < chipno; ++i) chip_cgpu = chip_cgpu->next_proc; } chip_thr = chip_cgpu->thr[0]; applog(LOG_DEBUG, "%"PRIpreprv": Queue result: %s", chip_cgpu->proc_repr, buf); if (unlikely(!thiswork)) { applog(LOG_ERR, "%"PRIpreprv": Failed to find work for queue results: %s", chip_cgpu->proc_repr, buf); inc_hw_errors_only(chip_thr); goto next_qline; } if (unlikely(!end[0])) { applog(LOG_ERR, "%"PRIpreprv": Missing nonce count in queue results: %s", chip_cgpu->proc_repr, buf); goto finishresult; } if (strtol(&end[1], &end, 10)) { if (unlikely(!end[0])) { applog(LOG_ERR, "%"PRIpreprv": Missing nonces in queue results: %s", chip_cgpu->proc_repr, buf); goto finishresult; } bitforce_process_result_nonces(chip_thr, work, &end[1]); } ++fcount; ++counts[chipno]; finishresult: if (data->parallel == 1) { // Queue results are in order, so anything queued prior this is lost // Delete all queued work up to, and including, this one DL_FOREACH_SAFE(thr->work_list, work, tmpwork) { work_list_del(&thr->work_list, work); --data->queued; if (work == thiswork) break; } } else { // Parallel processors means the results might not be in order // This could leak if jobs get lost, hence the sanity checks using "ZqX" work_list_del(&thr->work_list, thiswork); --data->queued; } next_qline: (void)0; } bitforce_set_queue_full(thr); if (count >= BITFORCE_MAX_QRESULTS) goto again; if (data->parallel == 1 && ( (fcount < BITFORCE_GOAL_QRESULTS && bitforce->sleep_ms < BITFORCE_MAX_QRESULT_WAIT && data->queued > 1) || (fcount > BITFORCE_GOAL_QRESULTS && bitforce->sleep_ms > BITFORCE_MIN_QRESULT_WAIT) )) { unsigned int old_sleep_ms = bitforce->sleep_ms; bitforce->sleep_ms = (uint32_t)bitforce->sleep_ms * BITFORCE_GOAL_QRESULTS / (fcount ?: 1); if (bitforce->sleep_ms > BITFORCE_MAX_QRESULT_WAIT) bitforce->sleep_ms = BITFORCE_MAX_QRESULT_WAIT; if (bitforce->sleep_ms < BITFORCE_MIN_QRESULT_WAIT) bitforce->sleep_ms = BITFORCE_MIN_QRESULT_WAIT; applog(LOG_DEBUG, "%"PRIpreprv": Received %d queue results after %ums; Wait time changed to: %ums (queued<=%d)", bitforce->proc_repr, fcount, old_sleep_ms, bitforce->sleep_ms, data->queued); } else applog(LOG_DEBUG, "%"PRIpreprv": Received %d queue results after %ums; Wait time unchanged (queued<=%d)", bitforce->proc_repr, fcount, bitforce->sleep_ms, data->queued); cgtime(&tv_now); timersub(&tv_now, &data->tv_hashmeter_start, &tv_elapsed); chip_cgpu = bitforce; for (int i = 0; i < data->parallel; ++i, (chip_cgpu = chip_cgpu->next_proc)) { chip_thr = chip_cgpu->thr[0]; hashes_done(chip_thr, (uint64_t)bitforce->nonces * counts[i], &tv_elapsed, NULL); } data->tv_hashmeter_start = tv_now; return true; } static bool bitforce_queue_append(struct thr_info *thr, struct work *work) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; bool rv, ndq; bitforce_set_queue_full(thr); rv = !thr->queue_full; if (rv) { DL_APPEND(thr->work_list, work); ++data->ready_to_queue; applog(LOG_DEBUG, "%"PRIpreprv": Appending to driver queue (max=%u, ready=%d, queued<=%d)", bitforce->proc_repr, (unsigned)data->queued_max, data->ready_to_queue, data->queued); bitforce_set_queue_full(thr); } else if (!data->ready_to_queue) return rv; ndq = !data->queued; if ((ndq) // Device is idle || (data->ready_to_queue >= BITFORCE_MAX_BQUEUE_AT_ONCE) // ...or 5 items ready to go || (thr->queue_full) // ...or done filling queue || (data->just_flushed) // ...or queue was just flushed (only remaining job is partly done already) || (data->missing_zwx) // ...or device can only queue one at a time ) { if (!bitforce_send_queue(thr)) { // Problem sending queue, retry again in a few seconds applog(LOG_ERR, "%"PRIpreprv": Failed to send queue", bitforce->proc_repr); inc_hw_errors_only(thr); data->want_to_send_queue = true; } } return rv; } struct _jobinfo { uint8_t key[32+12]; int instances; UT_hash_handle hh; }; static void bitforce_queue_flush(struct thr_info *thr) { struct bitforce_proc_data *procdata = thr->cgpu_data; if (!procdata->handles_board) return; struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; char *buf = &data->noncebuf[0], *buf2 = NULL; const char *cmd = "ZqX"; unsigned flushed; struct _jobinfo *processing = NULL, *item, *this; if (data->parallel == 1) // Pre-parallelization neither needs nor supports "ZqX" cmd = "ZQX"; // TODO: Call "ZQX" most of the time: don't need to do sanity checks so often bitforce_zox(thr, cmd); if (!strncasecmp(buf, "OK:FLUSHED", 10)) flushed = atoi(&buf[10]); else if ((!strncasecmp(buf, "COUNT:", 6)) && (buf2 = strstr(buf, "FLUSHED:")) ) { flushed = atoi(&buf2[8]); buf2 = next_line(buf2); } else if (!strncasecmp(buf, "OK", 2)) { applog(LOG_DEBUG, "%"PRIpreprv": Didn't report flush count", bitforce->proc_repr); thr->queue_full = false; flushed = 0; } else { applog(LOG_DEBUG, "%"PRIpreprv": Failed to flush device queue: %s", bitforce->proc_repr, buf); flushed = 0; } data->queued -= flushed; applog(LOG_DEBUG, "%"PRIpreprv": Flushed %u jobs from device and %d from driver (queued<=%d)", bitforce->proc_repr, flushed, data->ready_to_queue, data->queued); flushed += data->ready_to_queue; data->ready_to_queue = 0; while (flushed--) work_list_del(&thr->work_list, thr->work_list->prev); bitforce_set_queue_full(thr); data->just_flushed = true; data->want_to_send_queue = false; // "ZqX" returns jobs in progress, allowing us to sanity check // NOTE: Must process buffer into hash table BEFORE calling bitforce_queue_do_results, which clobbers it // NOTE: Must do actual sanity check AFTER calling bitforce_queue_do_results, to ensure we don't delete completed jobs if (buf2) { // First, turn buf2 into a hash for ( ; buf2[0]; buf2 = next_line(buf2)) { this = malloc(sizeof(*this)); hex2bin(&this->key[ 0], &buf2[ 0], 32); hex2bin(&this->key[32], &buf2[65], 12); HASH_FIND(hh, processing, &this->key[0], sizeof(this->key), item); if (likely(!item)) { this->instances = 1; HASH_ADD(hh, processing, key, sizeof(this->key), this); } else { // This should really only happen in testing/benchmarking... ++item->instances; free(this); } } } bitforce_queue_do_results(thr); if (buf2) { struct work *work, *tmp; uint8_t key[32+12]; // Now iterate over the work_list and delete anything not in the hash DL_FOREACH_SAFE(thr->work_list, work, tmp) { memcpy(&key[ 0], work->midstate, 32); memcpy(&key[32], &work->data[64], 12); HASH_FIND(hh, processing, &key[0], sizeof(key), item); if (unlikely(!item)) { char hex[89]; bin2hex(hex, key, 32+12); applog(LOG_WARNING, "%"PRIpreprv": Sanity check: Device is missing queued job! %s", bitforce->proc_repr, hex); work_list_del(&thr->work_list, work); continue; } if (likely(!--item->instances)) { HASH_DEL(processing, item); free(item); } } if (unlikely( (flushed = HASH_COUNT(processing)) )) { //applog(LOG_WARNING, "%"PRIpreprv": Sanity check: Device is working on %d unknown jobs!", bitforce->proc_repr, flushed); // FIXME: Probably these were jobs finished after ZqX, included in the result check we just did // NOTE: We need to do that result check first to avoid deleting work_list items for things just solved HASH_ITER(hh, processing, item, this) { HASH_DEL(processing, item); free(item); } } } } static void bitforce_queue_poll(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu; struct bitforce_data *data = bitforce->device_data; unsigned long sleep_us; if (data->queued) bitforce_queue_do_results(thr); sleep_us = (unsigned long)bitforce->sleep_ms * 1000; if (data->want_to_send_queue) if (!bitforce_send_queue(thr)) if (!data->queued) { applog(LOG_ERR, "%"PRIpreprv": Failed to send queue, and queue empty; retrying after 1 second", bitforce->proc_repr); inc_hw_errors_only(thr); sleep_us = 1000000; } timer_set_delay_from_now(&thr->tv_poll, sleep_us); } static void bitforce_queue_thread_deven(struct thr_info *thr) { struct cgpu_info *bitforce = thr->cgpu, *thisbf; struct bitforce_data *data = bitforce->device_data; struct thr_info *thisthr; for (thisbf = bitforce->device; thisbf && thisbf->device_data != data; thisbf = thisbf->next_proc) {} for ( ; thisbf && thisbf->device_data == data; thisbf = thisbf->next_proc) { thisthr = bitforce->thr[0]; thisthr->pause = thr->pause; thisbf->deven = bitforce->deven; } } static void bitforce_queue_thread_disable(struct thr_info *thr) { // Disable other threads sharing the same queue bitforce_queue_thread_deven(thr); } static void bitforce_queue_thread_enable(struct thr_info *thr) { // TODO: Maybe reinit? // Enable other threads sharing the same queue bitforce_queue_thread_deven(thr); } struct device_drv bitforce_queue_api = { .dname = "bitforce_queue", .name = "BFL", .lowl_probe_by_name_only = true, .lowl_match = bitforce_lowl_match, .lowl_probe = bitforce_lowl_probe, .minerloop = minerloop_queue, .reinit_device = bitforce_reinit, #ifdef HAVE_CURSES .proc_wlogprint_status = bitforce_wlogprint_status, .proc_tui_wlogprint_choices = bitforce_tui_wlogprint_choices, .proc_tui_handle_choice = bitforce_tui_handle_choice, #endif .get_api_stats = bitforce_drv_stats, .get_stats = bitforce_get_stats, .set_device = bitforce_set_device, .identify_device = bitforce_identify, .thread_prepare = bitforce_thread_prepare, .thread_init = bitforce_thread_init, .queue_append = bitforce_queue_append, .queue_flush = bitforce_queue_flush, .poll = bitforce_queue_poll, .thread_shutdown = bitforce_shutdown, .thread_disable = bitforce_queue_thread_disable, .thread_enable = bitforce_queue_thread_enable, }; bfgminer-bfgminer-3.10.0/driver-bitfury.c000066400000000000000000000523721226556647300203250ustar00rootroot00000000000000/* * Copyright 2013 bitfury * Copyright 2013 Anatoly Legkodymov * Copyright 2013 Luke Dashjr * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config.h" #include #include "miner.h" #include #include #include #include #include "deviceapi.h" #include "driver-bitfury.h" #include "libbitfury.h" #include "util.h" #include "spidevc.h" BFG_REGISTER_DRIVER(bitfury_drv) static char *bitfury_spi_port_config(struct cgpu_info *, char *, char *, char *); static int bitfury_autodetect() { RUNONCE(0); int chip_n; struct cgpu_info *bitfury_info; bitfury_info = calloc(1, sizeof(struct cgpu_info)); bitfury_info->drv = &bitfury_drv; bitfury_info->threads = 1; applog(LOG_INFO, "INFO: bitfury_detect"); spi_init(); if (!sys_spi) return 0; { struct bitfury_device dummy_bitfury = { .spi = sys_spi, }; drv_set_defaults(&bitfury_drv, bitfury_spi_port_config, &dummy_bitfury); } chip_n = libbitfury_detectChips1(sys_spi); if (!chip_n) { applog(LOG_WARNING, "No Bitfury chips detected!"); return 0; } else { applog(LOG_WARNING, "BITFURY: %d chips detected!", chip_n); } bitfury_info->procs = chip_n; add_cgpu(bitfury_info); return 1; } static void bitfury_detect(void) { noserial_detect_manual(&bitfury_drv, bitfury_autodetect); } static void *bitfury_just_io(struct bitfury_device * const bitfury) { struct spi_port * const spi = bitfury->spi; const int chip = bitfury->fasync; void *rv; spi_clear_buf(spi); spi_emit_break(spi); spi_emit_fasync(spi, chip); rv = spi_emit_data(spi, 0x3000, &bitfury->atrvec[0], 19 * 4); spi_txrx(spi); return rv; } static void bitfury_debug_nonce_array(const struct cgpu_info * const proc, const char *msg, const uint32_t * const inp) { const struct bitfury_device * const bitfury = proc->device_data; const int active = bitfury->active; char s[((1 + 8) * 0x10) + 1]; char *sp = s; for (int i = 0; i < 0x10; ++i) sp += sprintf(sp, "%c%08lx", (active == i) ? '>' : ' ', (unsigned long)bitfury_decnonce(inp[i])); applog(LOG_DEBUG, "%"PRIpreprv": %s%s (job=%08lx)", proc->proc_repr, msg, s, (unsigned long)inp[0x10]); } static bool bitfury_init_oldbuf(struct cgpu_info * const proc, const uint32_t *inp) { struct bitfury_device * const bitfury = proc->device_data; uint32_t * const oldbuf = &bitfury->oldbuf[0]; uint32_t * const buf = &bitfury->newbuf[0]; int i, differ, tried = 0; if (!inp) inp = bitfury_just_io(bitfury); tryagain: if (tried > 3) { applog(LOG_ERR, "%"PRIpreprv": %s: Giving up after %d tries", proc->proc_repr, __func__, tried); bitfury->desync_counter = 99; return false; } ++tried; memcpy(buf, inp, 0x10 * 4); inp = bitfury_just_io(bitfury); differ = -1; for (i = 0; i < 0x10; ++i) { if (inp[i] != buf[i]) { if (differ != -1) { applog(LOG_DEBUG, "%"PRIpreprv": %s: Second differ at %d; trying again", proc->proc_repr, __func__, i); goto tryagain; } differ = i; applog(LOG_DEBUG, "%"PRIpreprv": %s: Differ at %d", proc->proc_repr, __func__, i); if (tried > 3) break; } } if (-1 == differ) { applog(LOG_DEBUG, "%"PRIpreprv": %s: No differ found; trying again", proc->proc_repr, __func__); goto tryagain; } bitfury->active = differ; memcpy(&oldbuf[0], &inp[bitfury->active], 4 * (0x10 - bitfury->active)); memcpy(&oldbuf[0x10 - bitfury->active], &inp[0], 4 * bitfury->active); bitfury->oldjob = inp[0x10]; bitfury->desync_counter = 0; if (opt_debug) bitfury_debug_nonce_array(proc, "Init", inp); return true; } bool bitfury_init_chip(struct cgpu_info * const proc) { struct bitfury_device * const bitfury = proc->device_data; struct bitfury_payload payload = { .midstate = "\xf9\x9a\xf0\xd5\x72\x34\x41\xdc\x9e\x10\xd1\x1f\xeb\xcd\xe3\xf5" "\x52\xf1\x14\x63\x06\x14\xd1\x12\x15\x25\x39\xd1\x7d\x77\x5a\xfd", .m7 = 0xafbd0b42, .ntime = 0xb6c24563, .nbits = 0x6dfa4352, }; bitfury_payload_to_atrvec(bitfury->atrvec, &payload); return bitfury_init_oldbuf(proc, NULL); } static bool bitfury_init(struct thr_info *thr) { struct cgpu_info *proc; struct bitfury_device *bitfury; for (proc = thr->cgpu; proc; proc = proc->next_proc) { bitfury = proc->device_data = malloc(sizeof(struct bitfury_device)); *bitfury = (struct bitfury_device){ .spi = sys_spi, .fasync = proc->proc_id, }; bitfury_init_chip(proc); bitfury->osc6_bits = 50; bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); } timer_set_now(&thr->tv_poll); return true; } void bitfury_disable(struct thr_info * const thr) { struct cgpu_info * const proc = thr->cgpu; struct bitfury_device * const bitfury = proc->device_data; applog(LOG_DEBUG, "%"PRIpreprv": Shutting down chip (disable)", proc->proc_repr); bitfury_send_shutdown(bitfury->spi, bitfury->slot, bitfury->fasync); } void bitfury_enable(struct thr_info * const thr) { struct cgpu_info * const proc = thr->cgpu; struct bitfury_device * const bitfury = proc->device_data; struct cgpu_info * const dev = proc->device; struct thr_info * const master_thr = dev->thr[0]; applog(LOG_DEBUG, "%"PRIpreprv": Reinitialising chip (enable)", proc->proc_repr); bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury_init_chip(proc); if (!timer_isset(&master_thr->tv_poll)) timer_set_now(&master_thr->tv_poll); } void bitfury_shutdown(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu, *proc; struct bitfury_device *bitfury; applog(LOG_INFO, "INFO bitfury_shutdown"); for (proc = cgpu; proc; proc = proc->next_proc) { bitfury = proc->device_data; bitfury_send_shutdown(bitfury->spi, bitfury->slot, bitfury->fasync); } } bool bitfury_job_prepare(struct thr_info *thr, struct work *work, __maybe_unused uint64_t max_nonce) { struct cgpu_info * const proc = thr->cgpu; struct bitfury_device * const bitfury = proc->device_data; if (opt_debug) { char hex[153]; bin2hex(hex, &work->data[0], 76); applog(LOG_DEBUG, "%"PRIpreprv": Preparing work %s", proc->proc_repr, hex); } work_to_bitfury_payload(&bitfury->payload, work); bitfury_payload_to_atrvec(bitfury->atrvec, &bitfury->payload); work->blk.nonce = 0xffffffff; return true; } static bool fudge_nonce(struct work * const work, uint32_t *nonce_p) { static const uint32_t offsets[] = {0, 0xffc00000, 0xff800000, 0x02800000, 0x02C00000, 0x00400000}; uint32_t nonce; int i; if (unlikely(!work)) return false; for (i = 0; i < 6; ++i) { nonce = *nonce_p + offsets[i]; if (test_nonce(work, nonce, false)) { *nonce_p = nonce; return true; } } return false; } void bitfury_noop_job_start(struct thr_info __maybe_unused * const thr) { } // freq_stat->{mh,s} are allocated such that [osc6_min] is the first valid index and [0] falls outside the allocation void bitfury_init_freq_stat(struct freq_stat * const c, const int osc6_min, const int osc6_max) { const int osc6_values = (osc6_max + 1 - osc6_min); void * const p = calloc(osc6_values, (sizeof(*c->mh) + sizeof(*c->s))); c->mh = p - (sizeof(*c->mh) * osc6_min); c->s = p + (sizeof(*c->mh) * osc6_values) - (sizeof(*c->s) * osc6_min); c->osc6_min = osc6_min; c->osc6_max = osc6_max; } void bitfury_clean_freq_stat(struct freq_stat * const c) { free(&c->mh[c->osc6_min]); } #define HOP_DONE 600 typedef uint32_t bitfury_inp_t[0x11]; static int bitfury_select_freq(struct bitfury_device *bitfury, struct cgpu_info *proc) { int freq; int random; int i; bool all_done; struct freq_stat *c; c = &bitfury->chip_stat; if (c->best_done) { freq = c->best_osc; } else { random = (int)(bitfury->mhz * 1000.0) & 1; freq = (bitfury->osc6_bits == c->osc6_max) ? c->osc6_min : bitfury->osc6_bits + random; all_done = true; for (i = c->osc6_min; i <= c->osc6_max; ++i) if (c->s[i] <= HOP_DONE) { all_done = false; break; } if (all_done) { double mh_max = 0.0; for (i = c->osc6_min; i <= c->osc6_max; ++i) { const double mh_actual = c->mh[i] / c->s[i]; if (mh_max >= mh_actual) continue; mh_max = mh_actual; freq = i; } c->best_done = 1; c->best_osc = freq; applog(LOG_DEBUG, "%"PRIpreprv": best_osc = %d", proc->proc_repr, freq); } } applog(LOG_DEBUG, "%"PRIpreprv": Changing osc6_bits to %d", proc->proc_repr, freq); bitfury->osc6_bits = freq; bitfury_send_freq(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); return 0; } void bitfury_do_io(struct thr_info * const master_thr) { struct cgpu_info *proc; struct thr_info *thr; struct bitfury_device *bitfury; struct freq_stat *c; const uint32_t *inp; int n, i, j; bool newjob; uint32_t nonce; int n_chips = 0, lastchip = 0; struct spi_port *spi = NULL; bool should_be_running; struct timeval tv_now; uint32_t counter; struct timeval *tvp_stat; for (proc = master_thr->cgpu; proc; proc = proc->next_proc) ++n_chips; struct cgpu_info *procs[n_chips]; void *rxbuf[n_chips]; bitfury_inp_t rxbuf_copy[n_chips]; // NOTE: This code assumes: // 1) that chips on the same SPI bus are grouped together // 2) that chips are in sequential fasync order n_chips = 0; for (proc = master_thr->cgpu; proc; proc = proc->next_proc) { thr = proc->thr[0]; bitfury = proc->device_data; should_be_running = (proc->deven == DEV_ENABLED && !thr->pause); if (should_be_running || thr->_job_transition_in_progress) { if (spi != bitfury->spi) { if (spi) spi_txrx(spi); spi = bitfury->spi; spi_clear_buf(spi); spi_emit_break(spi); lastchip = 0; } procs[n_chips] = proc; spi_emit_fasync(spi, bitfury->fasync - lastchip); lastchip = bitfury->fasync; rxbuf[n_chips] = spi_emit_data(spi, 0x3000, &bitfury->atrvec[0], 19 * 4); ++n_chips; } else if (thr->work /* is currently running */ && thr->busy_state != TBS_STARTING_JOB) ;//FIXME: shutdown chip } if (!spi) { timer_unset(&master_thr->tv_poll); return; } timer_set_now(&tv_now); spi_txrx(spi); for (j = 0; j < n_chips; ++j) { memcpy(rxbuf_copy[j], rxbuf[j], 0x11 * 4); rxbuf[j] = rxbuf_copy[j]; } for (j = 0; j < n_chips; ++j) { proc = procs[j]; thr = proc->thr[0]; bitfury = proc->device_data; tvp_stat = &bitfury->tv_stat; c = &bitfury->chip_stat; uint32_t * const newbuf = &bitfury->newbuf[0]; uint32_t * const oldbuf = &bitfury->oldbuf[0]; inp = rxbuf[j]; if (unlikely(bitfury->desync_counter == 99)) { bitfury_init_oldbuf(proc, inp); goto out; } if (opt_debug) bitfury_debug_nonce_array(proc, "Read", inp); // To avoid dealing with wrap-around entirely, we rotate array so previous active uint32_t is at index 0 memcpy(&newbuf[0], &inp[bitfury->active], 4 * (0x10 - bitfury->active)); memcpy(&newbuf[0x10 - bitfury->active], &inp[0], 4 * bitfury->active); newjob = inp[0x10]; if (newbuf[0xf] != oldbuf[0xf]) { inc_hw_errors2(thr, NULL, NULL); if (unlikely(++bitfury->desync_counter >= 4)) { applog(LOG_WARNING, "%"PRIpreprv": Previous nonce mismatch (4th try), recalibrating", proc->proc_repr); bitfury_init_oldbuf(proc, inp); continue; } applog(LOG_DEBUG, "%"PRIpreprv": Previous nonce mismatch, ignoring response", proc->proc_repr); goto out; } else bitfury->desync_counter = 0; if (bitfury->oldjob != newjob && thr->next_work) { mt_job_transition(thr); // TODO: Delay morework until right before it's needed timer_set_now(&thr->tv_morework); job_start_complete(thr); } for (n = 0; newbuf[n] == oldbuf[n]; ++n) { if (unlikely(n >= 0xf)) { inc_hw_errors2(thr, NULL, NULL); applog(LOG_DEBUG, "%"PRIpreprv": Full result match, reinitialising", proc->proc_repr); bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury->desync_counter = 99; goto out; } } counter = bitfury_decnonce(newbuf[n]); if ((counter & 0xFFC00000) == 0xdf800000) { counter &= 0x003fffff; int32_t cycles = counter - bitfury->counter1; if (cycles < 0) cycles += 0x00400000; if (cycles & 0x00200000) { long long unsigned int period; double ns; struct timeval d_time; timersub(&(tv_now), &(bitfury->timer1), &d_time); period = timeval_to_us(&d_time) * 1000ULL; ns = (double)period / (double)(cycles); bitfury->mhz = 1.0 / ns * 65.0 * 1000.0; if (bitfury->mhz_best) { if (bitfury->mhz < bitfury->mhz_best / 2) { applog(LOG_WARNING, "%"PRIpreprv": Frequency drop over 50%% detected, reinitialising", proc->proc_repr); bitfury->force_reinit = true; } } if ((int)bitfury->mhz > bitfury->mhz_best && bitfury->mhz_last > bitfury->mhz_best) { // mhz_best is the lowest of two sequential readings over the previous best if ((int)bitfury->mhz > bitfury->mhz_last) bitfury->mhz_best = bitfury->mhz_last; else bitfury->mhz_best = bitfury->mhz; } bitfury->mhz_last = bitfury->mhz; bitfury->counter1 = counter; copy_time(&(bitfury->timer1), &tv_now); } } if (tvp_stat->tv_sec == 0 && tvp_stat->tv_usec == 0) { copy_time(tvp_stat, &tv_now); } if (c->osc6_max) { if (timer_elapsed(tvp_stat, &tv_now) >= 60) { double mh_diff, s_diff; const int osc = bitfury->osc6_bits; // Copy current statistics mh_diff = bitfury->counter2 - c->omh; s_diff = total_secs - c->os; applog(LOG_DEBUG, "%"PRIpreprv": %.0f completed in %f seconds", proc->proc_repr, mh_diff, s_diff); if (osc >= c->osc6_min && osc <= c->osc6_max) { c->mh[osc] += mh_diff; c->s[osc] += s_diff; } c->omh = bitfury->counter2; c->os = total_secs; if (opt_debug && !c->best_done) { char logbuf[0x100]; logbuf[0] = '\0'; for (i = c->osc6_min; i <= c->osc6_max; ++i) tailsprintf(logbuf, sizeof(logbuf), " %d=%.3f/%3.0fs", i, c->mh[i] / c->s[i], c->s[i]); applog(LOG_DEBUG, "%"PRIpreprv":%s", proc->proc_repr, logbuf); } // Change freq; if (!c->best_done) { bitfury_select_freq(bitfury, proc); } else { applog(LOG_DEBUG, "%"PRIpreprv": Stable freq, osc6_bits: %d", proc->proc_repr, bitfury->osc6_bits); } } } if (n) { for (i = 0; i < n; ++i) { nonce = bitfury_decnonce(newbuf[i]); if (fudge_nonce(thr->work, &nonce)) { applog(LOG_DEBUG, "%"PRIpreprv": nonce %x = %08lx (work=%p)", proc->proc_repr, i, (unsigned long)nonce, thr->work); submit_nonce(thr, thr->work, nonce); bitfury->counter2 += 1; } else if (fudge_nonce(thr->prev_work, &nonce)) { applog(LOG_DEBUG, "%"PRIpreprv": nonce %x = %08lx (prev work=%p)", proc->proc_repr, i, (unsigned long)nonce, thr->prev_work); submit_nonce(thr, thr->prev_work, nonce); bitfury->counter2 += 1; } else { inc_hw_errors(thr, thr->work, nonce); ++bitfury->sample_hwe; bitfury->strange_counter += 1; } if (++bitfury->sample_tot >= 0x40 || bitfury->sample_hwe >= 8) { if (bitfury->sample_hwe >= 8) { applog(LOG_WARNING, "%"PRIpreprv": %d of the last %d results were bad, reinitialising", proc->proc_repr, bitfury->sample_hwe, bitfury->sample_tot); bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury->desync_counter = 99; } bitfury->sample_tot = bitfury->sample_hwe = 0; } } bitfury->active = (bitfury->active + n) % 0x10; } memcpy(&oldbuf[0], &newbuf[n], 4 * (0x10 - n)); memcpy(&oldbuf[0x10 - n], &newbuf[0], 4 * n); bitfury->oldjob = newjob; out: if (unlikely(bitfury->force_reinit)) { applog(LOG_DEBUG, "%"PRIpreprv": Forcing reinitialisation", proc->proc_repr); bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury->desync_counter = 99; bitfury->mhz_last = 0; bitfury->mhz_best = 0; bitfury->force_reinit = false; } if (timer_elapsed(tvp_stat, &tv_now) >= 60) copy_time(tvp_stat, &tv_now); } timer_set_delay(&master_thr->tv_poll, &tv_now, 10000); } int64_t bitfury_job_process_results(struct thr_info *thr, struct work *work, bool stopping) { // Bitfury chips process only 768/1024 of the nonce range return 0xbd000000; } struct api_data *bitfury_api_device_detail(struct cgpu_info * const cgpu) { struct bitfury_device * const bitfury = cgpu->device_data; struct api_data *root = NULL; root = api_add_uint(root, "fasync", &bitfury->fasync, false); return root; } struct api_data *bitfury_api_device_status(struct cgpu_info * const cgpu) { struct bitfury_device * const bitfury = cgpu->device_data; struct api_data *root = NULL; int clock_bits = bitfury->osc6_bits; root = api_add_int(root, "Clock Bits", &clock_bits, true); root = api_add_freq(root, "Frequency", &bitfury->mhz, false); return root; } static bool _bitfury_set_device_parse_setting(uint32_t * const rv, char * const setting, char * const replybuf, const int maxval) { char *p; long int nv; if (!setting || !*setting) { sprintf(replybuf, "missing setting"); return false; } nv = strtol(setting, &p, 0); if (nv > maxval || nv < 1) { sprintf(replybuf, "invalid setting"); return false; } *rv = nv; return true; } static char *bitfury_spi_port_config(struct cgpu_info * const proc, char *option, char *setting, char *replybuf) { struct bitfury_device * const bitfury = proc->device_data; if (!strcasecmp(option, "baud")) { if (!_bitfury_set_device_parse_setting(&bitfury->spi->speed, setting, replybuf, INT_MAX)) return replybuf; return NULL; } return ""; } char *bitfury_set_device(struct cgpu_info * const proc, char * const option, char * const setting, char * const replybuf) { struct bitfury_device * const bitfury = proc->device_data; char *rv; uint32_t newval; if (!strcasecmp(option, "help")) { sprintf(replybuf, "baud: SPI baud rate\nosc6_bits: range 1-%d (slow to fast)", BITFURY_MAX_OSC6_BITS); return replybuf; } rv = bitfury_spi_port_config(proc, option, setting, replybuf); if ((!rv) || rv[0]) return rv; if (!strcasecmp(option, "osc6_bits")) { struct freq_stat * const c = &bitfury->chip_stat; newval = bitfury->osc6_bits; if (!_bitfury_set_device_parse_setting(&newval, setting, replybuf, BITFURY_MAX_OSC6_BITS)) return replybuf; bitfury->osc6_bits = newval; bitfury->force_reinit = true; c->osc6_max = 0; return NULL; } sprintf(replybuf, "Unknown option: %s", option); return replybuf; } #ifdef HAVE_CURSES void bitfury_tui_wlogprint_choices(struct cgpu_info *cgpu) { wlogprint("[O]scillator bits "); } const char *bitfury_tui_handle_choice(struct cgpu_info *cgpu, int input) { struct bitfury_device * const bitfury = cgpu->device_data; char buf[0x100]; switch (input) { case 'o': case 'O': { struct freq_stat * const c = &bitfury->chip_stat; int val; char *intvar; sprintf(buf, "Set oscillator bits (range 1-%d; slow to fast)", BITFURY_MAX_OSC6_BITS); intvar = curses_input(buf); if (!intvar) return "Invalid oscillator bits\n"; val = atoi(intvar); free(intvar); if (val < 1 || val > BITFURY_MAX_OSC6_BITS) return "Invalid oscillator bits\n"; bitfury->osc6_bits = val; bitfury->force_reinit = true; c->osc6_max = 0; return "Oscillator bits changing\n"; } } return NULL; } void bitfury_wlogprint_status(struct cgpu_info *cgpu) { struct bitfury_device * const bitfury = cgpu->device_data; wlogprint("Oscillator bits: %d\n", bitfury->osc6_bits); } #endif struct device_drv bitfury_drv = { .dname = "bitfury_gpio", .name = "BFY", .drv_detect = bitfury_detect, .thread_init = bitfury_init, .thread_disable = bitfury_disable, .thread_enable = bitfury_enable, .thread_shutdown = bitfury_shutdown, .minerloop = minerloop_async, .job_prepare = bitfury_job_prepare, .job_start = bitfury_noop_job_start, .poll = bitfury_do_io, .job_process_results = bitfury_job_process_results, .get_api_extra_device_detail = bitfury_api_device_detail, .get_api_extra_device_status = bitfury_api_device_status, .set_device = bitfury_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = bitfury_wlogprint_status, .proc_tui_wlogprint_choices = bitfury_tui_wlogprint_choices, .proc_tui_handle_choice = bitfury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-bitfury.h000066400000000000000000000023401226556647300203200ustar00rootroot00000000000000#ifndef BFG_DRIVER_BITFURY_H #define BFG_DRIVER_BITFURY_H #include #include #include "miner.h" #define BITFURY_MAX_OSC6_BITS 60 extern bool bitfury_prepare(struct thr_info *); extern bool bitfury_init_chip(struct cgpu_info *); extern void bitfury_init_freq_stat(struct freq_stat *, int osc6_min, int osc6_max); extern void bitfury_clean_freq_stat(struct freq_stat *); extern bool bitfury_job_prepare(struct thr_info *, struct work *, uint64_t max_nonce); extern void bitfury_noop_job_start(struct thr_info *); extern void bitfury_do_io(struct thr_info *); extern int64_t bitfury_job_process_results(struct thr_info *, struct work *, bool stopping); extern struct api_data *bitfury_api_device_detail(struct cgpu_info *); extern struct api_data *bitfury_api_device_status(struct cgpu_info *); extern char *bitfury_set_device(struct cgpu_info *, char *, char *, char *); extern void bitfury_tui_wlogprint_choices(struct cgpu_info *); extern const char *bitfury_tui_handle_choice(struct cgpu_info *, int input); extern void bitfury_wlogprint_status(struct cgpu_info *); extern void bitfury_disable(struct thr_info *); extern void bitfury_enable(struct thr_info *); extern void bitfury_shutdown(struct thr_info *); #endif bfgminer-bfgminer-3.10.0/driver-cairnsmore.c000066400000000000000000000136631226556647300210030ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include "compat.h" #include "dynclock.h" #include "icarus-common.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "miner.h" #define CAIRNSMORE1_IO_SPEED 115200 // This is a general ballpark #define CAIRNSMORE1_HASH_TIME 0.0000000024484 #define CAIRNSMORE1_MINIMUM_CLOCK 50 #define CAIRNSMORE1_DEFAULT_CLOCK 200 #define CAIRNSMORE1_MAXIMUM_CLOCK 210 BFG_REGISTER_DRIVER(cairnsmore_drv) static bool cairnsmore_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "Cairnsmore1"); } static bool cairnsmore_detect_one(const char *devpath) { struct ICARUS_INFO *info = calloc(1, sizeof(struct ICARUS_INFO)); if (unlikely(!info)) quit(1, "Failed to malloc ICARUS_INFO"); info->baud = CAIRNSMORE1_IO_SPEED; info->work_division = 2; info->fpga_count = 2; info->quirk_reopen = 0; info->Hs = CAIRNSMORE1_HASH_TIME; info->timing_mode = MODE_LONG; info->do_icarus_timing = true; if (!icarus_detect_custom(devpath, &cairnsmore_drv, info)) { free(info); return false; } return true; } static bool cairnsmore_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, cairnsmore_detect_one); } static bool cairnsmore_send_cmd(int fd, uint8_t cmd, uint8_t data, bool probe) { unsigned char pkt[64] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "vdi\xb7" "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" "bfg0" "\xff\xff\xff\xff" "\xb5\0\0\0"; if (unlikely(probe)) pkt[61] = '\x01'; pkt[32] = 0xda ^ cmd ^ data; pkt[33] = data; pkt[34] = cmd; return write(fd, pkt, sizeof(pkt)) == sizeof(pkt); } bool cairnsmore_supports_dynclock(int fd) { if (!cairnsmore_send_cmd(fd, 0, 1, true)) return false; if (!cairnsmore_send_cmd(fd, 0, 1, true)) return false; uint32_t nonce = 0; { struct timeval tv_finish; struct thr_info dummy = { .work_restart = false, .work_restart_notifier = {-1, -1}, }; icarus_gets((unsigned char*)&nonce, fd, &tv_finish, &dummy, 1, ICARUS_DEFAULT_READ_SIZE); } applog(LOG_DEBUG, "Cairnsmore dynclock detection... Got %08x", nonce); switch (nonce) { case 0x00949a6f: // big endian case 0x6f9a9400: // little endian // Hashed the command, so it's not supported return false; default: applog(LOG_WARNING, "Unexpected nonce from dynclock probe: %08x", (uint32_t)be32toh(nonce)); return false; case 0: return true; } } #define cairnsmore_send_cmd(fd, cmd, data) cairnsmore_send_cmd(fd, cmd, data, false) static bool cairnsmore_change_clock_func(struct thr_info *thr, int bestM) { struct cgpu_info *cm1 = thr->cgpu; struct ICARUS_INFO *info = cm1->device_data; if (unlikely(!cairnsmore_send_cmd(cm1->device_fd, 0, bestM))) return false; // Adjust Hs expectations for frequency change info->Hs = info->Hs * (double)bestM / (double)info->dclk.freqM; dclk_msg_freqchange(cm1->proc_repr, 2.5 * (double)info->dclk.freqM, 2.5 * (double)bestM, NULL); info->dclk.freqM = bestM; return true; } static bool cairnsmore_init(struct thr_info *thr) { struct cgpu_info *cm1 = thr->cgpu; struct ICARUS_INFO *info = cm1->device_data; struct icarus_state *state = thr->cgpu_data; if (cairnsmore_supports_dynclock(cm1->device_fd)) { info->dclk_change_clock_func = cairnsmore_change_clock_func; dclk_prepare(&info->dclk); info->dclk.freqMinM = CAIRNSMORE1_MINIMUM_CLOCK / 2.5; info->dclk.freqMaxM = CAIRNSMORE1_MAXIMUM_CLOCK / 2.5; info->dclk.freqM = info->dclk.freqMDefault = CAIRNSMORE1_DEFAULT_CLOCK / 2.5; cairnsmore_send_cmd(cm1->device_fd, 0, info->dclk.freqM); applog(LOG_WARNING, "%"PRIpreprv": Frequency set to %u MHz (range: %u-%u)", cm1->proc_repr, CAIRNSMORE1_DEFAULT_CLOCK, CAIRNSMORE1_MINIMUM_CLOCK, CAIRNSMORE1_MAXIMUM_CLOCK ); // The dynamic-clocking firmware connects each FPGA as its own device if (!(info->user_set & 1)) { info->work_division = 1; if (!(info->user_set & 2)) info->fpga_count = 1; } } else { applog(LOG_WARNING, "%"PRIpreprv": Frequency scaling not supported", cm1->proc_repr ); } // Commands corrupt the hash state, so next scanhash is a firstrun state->firstrun = true; return true; } void convert_icarus_to_cairnsmore(struct cgpu_info *cm1) { struct ICARUS_INFO *info = cm1->device_data; info->Hs = CAIRNSMORE1_HASH_TIME; info->fullnonce = info->Hs * (((double)0xffffffff) + 1); info->timing_mode = MODE_LONG; info->do_icarus_timing = true; cm1->drv = &cairnsmore_drv; renumber_cgpu(cm1); cairnsmore_init(cm1->thr[0]); } static struct api_data *cairnsmore_drv_extra_device_status(struct cgpu_info *cm1) { struct ICARUS_INFO *info = cm1->device_data; struct api_data*root = NULL; if (info->dclk.freqM) { double frequency = 2.5 * info->dclk.freqM; root = api_add_freq(root, "Frequency", &frequency, true); } return root; } static bool cairnsmore_identify(struct cgpu_info *cm1) { struct ICARUS_INFO *info = cm1->device_data; if (!info->dclk.freqM) return false; cairnsmore_send_cmd(cm1->device_fd, 1, 1); cgsleep_ms(5000); cairnsmore_send_cmd(cm1->device_fd, 1, 0); cm1->flash_led = true; return true; } static void cairnsmore_drv_init() { cairnsmore_drv = icarus_drv; cairnsmore_drv.dname = "cairnsmore"; cairnsmore_drv.name = "ECM"; cairnsmore_drv.lowl_match = cairnsmore_lowl_match; cairnsmore_drv.lowl_probe = cairnsmore_lowl_probe; cairnsmore_drv.thread_init = cairnsmore_init; cairnsmore_drv.identify_device = cairnsmore_identify; cairnsmore_drv.get_api_extra_device_status = cairnsmore_drv_extra_device_status; ++cairnsmore_drv.probe_priority; } struct device_drv cairnsmore_drv = { .drv_init = cairnsmore_drv_init, }; bfgminer-bfgminer-3.10.0/driver-cpu.c000066400000000000000000000522061226556647300174240ustar00rootroot00000000000000/* * Copyright 2011-2012 Con Kolivas * Copyright 2011-2013 Luke Dashjr * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #include #endif #include #include "compat.h" #include "deviceapi.h" #include "miner.h" #include "bench_block.h" #include "logging.h" #include "util.h" #include "driver-cpu.h" #if defined(unix) #include #include #endif BFG_REGISTER_DRIVER(cpu_drv) #if defined(__linux) && defined(CPU_ZERO) /* Linux specific policy and affinity management */ #include static inline void drop_policy(void) { struct sched_param param; #ifdef SCHED_BATCH #ifdef SCHED_IDLE if (unlikely(sched_setscheduler(0, SCHED_IDLE, ¶m) == -1)) #endif sched_setscheduler(0, SCHED_BATCH, ¶m); #endif } static inline void affine_to_cpu(int id, int cpu) { cpu_set_t set; CPU_ZERO(&set); CPU_SET(cpu, &set); sched_setaffinity(0, sizeof(&set), &set); applog(LOG_INFO, "Binding cpu mining thread %d to cpu %d", id, cpu); } #else static inline void drop_policy(void) { } static inline void affine_to_cpu(int __maybe_unused id, int __maybe_unused cpu) { } #endif /* TODO: resolve externals */ extern char *set_int_range(const char *arg, int *i, int min, int max); extern int dev_from_id(int thr_id); /* chipset-optimized hash functions */ extern bool ScanHash_4WaySSE2(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce); extern bool ScanHash_altivec_4way(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce); extern bool scanhash_via(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n); extern bool scanhash_c(struct thr_info*, const unsigned char *midstate, unsigned char *data, unsigned char *hash1, unsigned char *hash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n); extern bool scanhash_cryptopp(struct thr_info*, const unsigned char *midstate,unsigned char *data, unsigned char *hash1, unsigned char *hash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n); extern bool scanhash_asm32(struct thr_info*, const unsigned char *midstate,unsigned char *data, unsigned char *hash1, unsigned char *hash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce); extern bool scanhash_sse2_64(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce); extern bool scanhash_sse4_64(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce); extern bool scanhash_sse2_32(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce); extern bool scanhash_scrypt(struct thr_info *thr, int thr_id, unsigned char *pdata, unsigned char *scratchbuf, const unsigned char *ptarget, uint32_t max_nonce, unsigned long *hashes_done); #ifdef WANT_CPUMINE static size_t max_name_len = 0; static char *name_spaces_pad = NULL; const char *algo_names[] = { [ALGO_C] = "c", #ifdef WANT_SSE2_4WAY [ALGO_4WAY] = "4way", #endif #ifdef WANT_VIA_PADLOCK [ALGO_VIA] = "via", #endif [ALGO_CRYPTOPP] = "cryptopp", #ifdef WANT_CRYPTOPP_ASM32 [ALGO_CRYPTOPP_ASM32] = "cryptopp_asm32", #endif #ifdef WANT_X8632_SSE2 [ALGO_SSE2_32] = "sse2_32", #endif #ifdef WANT_X8664_SSE2 [ALGO_SSE2_64] = "sse2_64", #endif #ifdef WANT_X8664_SSE4 [ALGO_SSE4_64] = "sse4_64", #endif #ifdef WANT_ALTIVEC_4WAY [ALGO_ALTIVEC_4WAY] = "altivec_4way", #endif #ifdef WANT_SCRYPT [ALGO_SCRYPT] = "scrypt", #endif [ALGO_FASTAUTO] = "fastauto", [ALGO_AUTO] = "auto", }; static const sha256_func sha256_funcs[] = { [ALGO_C] = (sha256_func)scanhash_c, #ifdef WANT_SSE2_4WAY [ALGO_4WAY] = (sha256_func)ScanHash_4WaySSE2, #endif #ifdef WANT_ALTIVEC_4WAY [ALGO_ALTIVEC_4WAY] = (sha256_func) ScanHash_altivec_4way, #endif #ifdef WANT_VIA_PADLOCK [ALGO_VIA] = (sha256_func)scanhash_via, #endif [ALGO_CRYPTOPP] = (sha256_func)scanhash_cryptopp, #ifdef WANT_CRYPTOPP_ASM32 [ALGO_CRYPTOPP_ASM32] = (sha256_func)scanhash_asm32, #endif #ifdef WANT_X8632_SSE2 [ALGO_SSE2_32] = (sha256_func)scanhash_sse2_32, #endif #ifdef WANT_X8664_SSE2 [ALGO_SSE2_64] = (sha256_func)scanhash_sse2_64, #endif #ifdef WANT_X8664_SSE4 [ALGO_SSE4_64] = (sha256_func)scanhash_sse4_64, #endif #ifdef WANT_SCRYPT [ALGO_SCRYPT] = (sha256_func)scanhash_scrypt #endif }; #endif #ifdef WANT_CPUMINE enum sha256_algos opt_algo = ALGO_FASTAUTO; bool opt_usecpu = false; static bool forced_n_threads; #endif static const uint32_t hash1_init[] = { 0,0,0,0,0,0,0,0, 0x80000000, 0,0,0,0,0,0, 0x100, }; #ifdef WANT_CPUMINE // Algo benchmark, crash-prone, system independent stage double bench_algo_stage3( enum sha256_algos algo ) { // Use a random work block pulled from a pool static uint8_t bench_block[] = { CGMINER_BENCHMARK_BLOCK }; struct work work __attribute__((aligned(128))); unsigned char hash1[64]; size_t bench_size = sizeof(work); size_t work_size = sizeof(bench_block); size_t min_size = (work_size < bench_size ? work_size : bench_size); memset(&work, 0, sizeof(work)); memcpy(&work, &bench_block, min_size); static struct thr_info dummy; struct timeval end; struct timeval start; uint32_t max_nonce = opt_algo == ALGO_FASTAUTO ? (1<<8) : (1<<22); uint32_t last_nonce = 0; memcpy(&hash1[0], &hash1_init[0], sizeof(hash1)); timer_set_now(&start); { sha256_func func = sha256_funcs[algo]; (*func)( &dummy, work.midstate, work.data, hash1, work.hash, work.target, max_nonce, &last_nonce, work.blk.nonce ); } timer_set_now(&end); uint64_t usec_end = ((uint64_t)end.tv_sec)*1000*1000 + end.tv_usec; uint64_t usec_start = ((uint64_t)start.tv_sec)*1000*1000 + start.tv_usec; uint64_t usec_elapsed = usec_end - usec_start; double rate = -1.0; if (0drv = &cpu_drv; cgpu->devtype = "CPU"; cgpu->deven = DEV_ENABLED; cgpu->threads = 1; cgpu->kname = algo_names[opt_algo]; add_cgpu(cgpu); } return opt_n_threads; } static void cpu_detect() { noserial_detect_manual(&cpu_drv, cpu_autodetect); } static pthread_mutex_t cpualgo_lock; static bool cpu_thread_prepare(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; if (!(cgpu->device_id || thr->device_thread || cgpu->proc_id)) mutex_init(&cpualgo_lock); thread_reportin(thr); return true; } static uint64_t cpu_can_limit_work(struct thr_info __maybe_unused *thr) { return 0xffff; } static bool cpu_thread_init(struct thr_info *thr) { const int thr_id = thr->id; struct cgpu_info *cgpu = thr->cgpu; mutex_lock(&cpualgo_lock); switch (opt_algo) { case ALGO_AUTO: case ALGO_FASTAUTO: opt_algo = pick_fastest_algo(); default: break; } mutex_unlock(&cpualgo_lock); cgpu->kname = algo_names[opt_algo]; /* Set worker threads to nice 19 and then preferentially to SCHED_IDLE * and if that fails, then SCHED_BATCH. No need for this to be an * error if it fails */ setpriority(PRIO_PROCESS, 0, 19); drop_policy(); /* Cpu affinity only makes sense if the number of threads is a multiple * of the number of CPUs */ if (!(opt_n_threads % num_processors)) affine_to_cpu(dev_from_id(thr_id), dev_from_id(thr_id) % num_processors); return true; } static int64_t cpu_scanhash(struct thr_info *thr, struct work *work, int64_t max_nonce) { unsigned char hash1[64]; uint32_t first_nonce = work->blk.nonce; uint32_t last_nonce; bool rc; memcpy(&hash1[0], &hash1_init[0], sizeof(hash1)); CPUSearch: last_nonce = first_nonce; rc = false; /* scan nonces for a proof-of-work hash */ { sha256_func func = sha256_funcs[opt_algo]; rc = (*func)( thr, work->midstate, work->data, hash1, work->hash, work->target, max_nonce, &last_nonce, work->blk.nonce ); } /* if nonce found, submit work */ if (unlikely(rc)) { applog(LOG_DEBUG, "%"PRIpreprv" found something?", thr->cgpu->proc_repr); submit_nonce(thr, work, le32toh(*(uint32_t*)&work->data[76])); work->blk.nonce = last_nonce + 1; goto CPUSearch; } else if (unlikely(last_nonce == first_nonce)) return 0; work->blk.nonce = last_nonce + 1; return last_nonce - first_nonce + 1; } struct device_drv cpu_drv = { .dname = "cpu", .name = "CPU", .probe_priority = 120, .supported_algos = POW_SHA256D | POW_SCRYPT, .drv_detect = cpu_detect, .thread_prepare = cpu_thread_prepare, .can_limit_work = cpu_can_limit_work, .thread_init = cpu_thread_init, .scanhash = cpu_scanhash, }; #endif bfgminer-bfgminer-3.10.0/driver-cpu.h000066400000000000000000000037251226556647300174330ustar00rootroot00000000000000/* * Copyright 2011-2013 Luke Dashjr * Copyright 2011-2012 Con Kolivas * Copyright 2011 Mark Crichton * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef __DEVICE_CPU_H__ #define __DEVICE_CPU_H__ #include "miner.h" #include "config.h" #include #ifndef OPT_SHOW_LEN #define OPT_SHOW_LEN 80 #endif #if defined(__i386__) && defined(HAVE_SSE2) #define WANT_SSE2_4WAY 1 #endif #ifdef __ALTIVEC__ #define WANT_ALTIVEC_4WAY 1 #endif #if defined(__i386__) && defined(HAVE_YASM) && defined(HAVE_SSE2) #define WANT_X8632_SSE2 1 #endif #ifdef __i386__ #define WANT_VIA_PADLOCK 1 #endif #if defined(__x86_64__) && defined(HAVE_YASM) #define WANT_X8664_SSE2 1 #endif #if defined(__x86_64__) && defined(HAVE_YASM) #define WANT_X8664_SSE4 1 #endif #ifdef USE_SCRYPT #define WANT_SCRYPT #endif enum sha256_algos { ALGO_C, /* plain C */ ALGO_4WAY, /* parallel SSE2 */ ALGO_VIA, /* VIA padlock */ ALGO_CRYPTOPP, /* Crypto++ (C) */ ALGO_CRYPTOPP_ASM32, /* Crypto++ 32-bit assembly */ ALGO_SSE2_32, /* SSE2 for x86_32 */ ALGO_SSE2_64, /* SSE2 for x86_64 */ ALGO_SSE4_64, /* SSE4 for x86_64 */ ALGO_ALTIVEC_4WAY, /* parallel Altivec */ ALGO_SCRYPT, /* scrypt */ ALGO_FASTAUTO, /* fast autodetect */ ALGO_AUTO /* autodetect */ }; extern const char *algo_names[]; extern bool opt_usecpu; extern struct device_drv cpu_drv; extern char *set_algo(const char *arg, enum sha256_algos *algo); extern void show_algo(char buf[OPT_SHOW_LEN], const enum sha256_algos *algo); extern char *force_nthreads_int(const char *arg, int *i); extern void init_max_name_len(); extern double bench_algo_stage3(enum sha256_algos algo); extern void set_scrypt_algo(enum sha256_algos *algo); #endif /* __DEVICE_CPU_H__ */ bfgminer-bfgminer-3.10.0/driver-drillbit.c000066400000000000000000000457511226556647300204510ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * Copyright 2013 Angus Gratton * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include #include #include #include #include "deviceapi.h" #include "logging.h" #include "lowlevel.h" #include "lowl-vcom.h" BFG_REGISTER_DRIVER(drillbit_drv) #define DRILLBIT_MIN_VERSION 2 #define DRILLBIT_MAX_VERSION 3 #define DRILLBIT_MAX_WORK_RESULTS 0x400 #define DRILLBIT_MAX_RESULT_NONCES 0x10 enum drillbit_capability { DBC_TEMP = 1, DBC_EXT_CLOCK = 2, }; enum drillbit_voltagecfg { DBV_650mV = 0, DBV_750mV = 2, DBV_850mV = 1, DBV_950mV = 3, }; struct drillbit_board { enum drillbit_voltagecfg core_voltage_cfg; unsigned clock_level; bool clock_div2; bool use_ext_clock; unsigned ext_clock_freq; bool need_reinit; bool trigger_identify; uint16_t caps; }; static bool drillbit_lowl_match(const struct lowlevel_device_info * const info) { if (!lowlevel_match_id(info, &lowl_vcom, 0, 0)) return false; return (info->manufacturer && strstr(info->manufacturer, "Drillbit")); } static bool drillbit_detect_one(const char * const devpath) { uint8_t buf[0x10]; const int fd = serial_open(devpath, 0, 1, true); if (fd == -1) applogr(false, LOG_DEBUG, "%s: %s: Failed to open", __func__, devpath); if (1 != write(fd, "I", 1)) { applog(LOG_DEBUG, "%s: %s: Error writing 'I'", __func__, devpath); err: serial_close(fd); return false; } if (sizeof(buf) != serial_read(fd, buf, sizeof(buf))) { applog(LOG_DEBUG, "%s: %s: Short read in response to 'I'", __func__, devpath); goto err; } serial_close(fd); const unsigned protover = buf[0]; const unsigned long serialno = (uint32_t)buf[9] | ((uint32_t)buf[0xa] << 8) | ((uint32_t)buf[0xb] << 16) | ((uint32_t)buf[0xc] << 24); char * const product = (void*)&buf[1]; buf[9] = '\0'; // Ensure it is null-terminated (clobbers serial, but we already parsed it) unsigned chips = buf[0xd]; uint16_t caps = (uint16_t)buf[0xe] | ((uint16_t)buf[0xf] << 8); if (!product[0]) applogr(false, LOG_DEBUG, "%s: %s: Null product name", __func__, devpath); if (!serialno) applogr(false, LOG_DEBUG, "%s: %s: Serial number is zero", __func__, devpath); if (!chips) applogr(false, LOG_DEBUG, "%s: %s: No chips found", __func__, devpath); int loglev = LOG_WARNING; if (!strcmp(product, "DRILLBIT")) { // Hack: first production firmwares all described themselves as DRILLBIT, so fill in the gaps if (chips == 1) strcpy(product, "Thumb"); else strcpy(product, "Eight"); } else if (chips == 8 && !strcmp(product, "Eight")) {} // Known device else if (chips == 1 && !strcmp(product, "Thumb")) {} // Known device else loglev = LOG_DEBUG; if (protover < DRILLBIT_MIN_VERSION || (loglev == LOG_DEBUG && protover > DRILLBIT_MAX_VERSION)) applogr(false, loglev, "%s: %s: Unknown device protocol version %u.", __func__, devpath, protover); if (protover > DRILLBIT_MAX_VERSION) applogr(false, loglev, "%s: %s: Device firmware uses newer Drillbit protocol %u. We only support up to %u. Find a newer BFGMiner!", __func__, devpath, protover, (unsigned)DRILLBIT_MAX_VERSION); if (protover == 2 && chips == 1) // Production firmware Thumbs don't set any capability bits, so fill in the EXT_CLOCK one caps |= DBC_EXT_CLOCK; char *serno = malloc(9); snprintf(serno, 9, "%08lx", serialno); if (chips > 0x100) { applog(LOG_WARNING, "%s: %s: %u chips reported, but driver only supports up to 256", __func__, devpath, chips); chips = 0x100; } struct cgpu_info * const cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &drillbit_drv, .device_path = strdup(devpath), .dev_product = strdup(product), .dev_serial = serno, .deven = DEV_ENABLED, .procs = chips, .threads = 1, .device_data = (void*)(intptr_t)caps, }; return add_cgpu(cgpu); } static bool drillbit_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, drillbit_detect_one); } static void drillbit_problem(struct cgpu_info * const dev) { struct thr_info * const master_thr = dev->thr[0]; if (dev->device_fd != -1) { serial_close(dev->device_fd); dev->device_fd = -1; } timer_set_delay_from_now(&master_thr->tv_poll, 5000000); } #define problem(...) do{ \ drillbit_problem(dev); \ applogr(__VA_ARGS__); \ }while(0) static bool drillbit_check_response(const char * const repr, const int fd, struct cgpu_info * const dev, const char expect) { uint8_t ack; if (1 != serial_read(fd, &ack, 1)) problem(false, LOG_ERR, "%s: Short read in response to '%c'", repr, expect); if (ack != expect) problem(false, LOG_ERR, "%s: Wrong response to '%c': %u", dev->dev_repr, expect, (unsigned)ack); return true; } static bool drillbit_reset(struct cgpu_info * const dev) { const int fd = dev->device_fd; if (unlikely(fd == -1)) return false; if (1 != write(fd, "R", 1)) problem(false, LOG_ERR, "%s: Error writing reset command", dev->dev_repr); return drillbit_check_response(dev->dev_repr, fd, dev, 'R'); } static bool drillbit_send_config(struct cgpu_info * const dev) { const int fd = dev->device_fd; if (unlikely(fd == -1)) return false; const struct drillbit_board * const board = dev->device_data; const uint8_t buf[7] = {'C', board->core_voltage_cfg, board->clock_level, (board->clock_div2 ? 1 : 0), (board->use_ext_clock ? 1 : 0), board->ext_clock_freq}; if (sizeof(buf) != write(fd, buf, sizeof(buf))) problem(false, LOG_ERR, "%s: Error sending config", dev->dev_repr); return drillbit_check_response(dev->dev_repr, fd, dev, 'C'); } static bool drillbit_resend_jobs(struct cgpu_info *proc); static bool drillbit_reconfigure(struct cgpu_info * const dev, const bool reopen) { struct thr_info * const master_thr = dev->thr[0]; int fd = dev->device_fd; if (reopen || fd == -1) { if (fd != -1) serial_close(fd); dev->device_fd = fd = serial_open(dev->device_path, 0, 10, true); if (fd == -1) return false; } if (!(drillbit_reset(dev) && drillbit_send_config(dev))) { serial_close(fd); dev->device_fd = -1; return false; } for (struct cgpu_info *proc = dev; proc; proc = proc->next_proc) drillbit_resend_jobs(proc); timer_set_delay_from_now(&master_thr->tv_poll, 10000); return true; } static bool drillbit_ensure_configured(struct cgpu_info * const dev) { if (dev->device_fd != -1) return true; return drillbit_reconfigure(dev, false); } static bool drillbit_init(struct thr_info * const master_thr) { struct cgpu_info * const dev = master_thr->cgpu; dev->device_fd = -1; struct drillbit_board * const board = malloc(sizeof(*board)); *board = (struct drillbit_board){ .core_voltage_cfg = DBV_850mV, .clock_level = 40, .clock_div2 = false, .use_ext_clock = false, .ext_clock_freq = 200, .caps = (intptr_t)dev->device_data, }; dev->device_data = board; drillbit_reconfigure(dev, false); return true; } static bool drillbit_job_prepare(struct thr_info * const thr, struct work * const work, __maybe_unused const uint64_t max_nonce) { struct cgpu_info * const proc = thr->cgpu; const int chipid = proc->proc_id; struct cgpu_info * const dev = proc->device; uint8_t buf[0x2f]; if (!drillbit_ensure_configured(dev)) return false; const int fd = dev->device_fd; buf[0] = 'W'; buf[1] = chipid; buf[2] = 0; // high bits of chipid memcpy(&buf[3], work->midstate, 0x20); memcpy(&buf[0x23], &work->data[0x40], 0xc); if (sizeof(buf) != write(fd, buf, sizeof(buf))) problem(false, LOG_ERR, "%"PRIpreprv": Error sending work %d", proc->proc_repr, work->id); if (!drillbit_check_response(proc->proc_repr, fd, dev, 'W')) problem(false, LOG_ERR, "%"PRIpreprv": Error queuing work %d", proc->proc_repr, work->id); applog(LOG_DEBUG, "%"PRIpreprv": Queued work %d", proc->proc_repr, work->id); work->blk.nonce = 0xffffffff; return true; } static bool drillbit_resend_jobs(struct cgpu_info * const proc) { struct thr_info * const thr = proc->thr[0]; bool rv = true; if (thr->work) if (!drillbit_job_prepare(thr, thr->work, 0)) { applog(LOG_WARNING, "%"PRIpreprv": Failed to resend %s work", proc->proc_repr, "current"); rv = false; } if (thr->next_work) { if (!drillbit_job_prepare(thr, thr->next_work, 0)) { applog(LOG_WARNING, "%"PRIpreprv": Failed to resend %s work", proc->proc_repr, "next"); rv = false; } if (!rv) { // Fake transition so we kinda recover eventually mt_job_transition(thr); job_start_complete(thr); timer_set_now(&thr->tv_morework); } } return rv; } static void drillbit_first_job_start(struct thr_info __maybe_unused * const thr) { struct cgpu_info * const proc = thr->cgpu; if (unlikely(!thr->work)) { applog(LOG_DEBUG, "%"PRIpreprv": No current work, assuming immediate start", proc->proc_repr); mt_job_transition(thr); job_start_complete(thr); timer_set_now(&thr->tv_morework); } } static int64_t drillbit_job_process_results(struct thr_info *thr, struct work *work, bool stopping) { return 0xbd000000; } static struct cgpu_info *drillbit_find_proc(struct cgpu_info * const dev, int chipid) { struct cgpu_info *proc = dev; for (int i = 0; i < chipid; ++i) { proc = proc->next_proc; if (unlikely(!proc)) return NULL; } return proc; } static bool bitfury_fudge_nonce2(struct work * const work, uint32_t * const nonce_p) { if (!work) return false; const uint32_t m7 = *((uint32_t *)&work->data[64]); const uint32_t ntime = *((uint32_t *)&work->data[68]); const uint32_t nbits = *((uint32_t *)&work->data[72]); return bitfury_fudge_nonce(work->midstate, m7, ntime, nbits, nonce_p); } static bool drillbit_get_work_results(struct cgpu_info * const dev) { const int fd = dev->device_fd; if (fd == -1) return false; uint8_t buf[4 + (4 * DRILLBIT_MAX_RESULT_NONCES)]; uint32_t total; int i, j; if (1 != write(fd, "E", 1)) problem(false, LOG_ERR, "%s: Error sending request for work results", dev->dev_repr); if (sizeof(total) != serial_read(fd, &total, sizeof(total))) problem(false, LOG_ERR, "%s: Short read in response to 'E'", dev->dev_repr); total = le32toh(total); if (total > DRILLBIT_MAX_WORK_RESULTS) problem(false, LOG_ERR, "%s: Impossible number of total work: %lu", dev->dev_repr, (unsigned long)total); for (i = 0; i < total; ++i) { if (sizeof(buf) != serial_read(fd, buf, sizeof(buf))) problem(false, LOG_ERR, "%s: Short read on %dth total work", dev->dev_repr, i); const int chipid = buf[0]; struct cgpu_info * const proc = drillbit_find_proc(dev, chipid); struct thr_info * const thr = proc->thr[0]; if (unlikely(!proc)) { applog(LOG_ERR, "%s: Unknown chip id %d", dev->dev_repr, chipid); continue; } const bool is_idle = buf[3]; int nonces = buf[2]; if (nonces > DRILLBIT_MAX_RESULT_NONCES) { applog(LOG_ERR, "%"PRIpreprv": More than %d nonces claimed, impossible", proc->proc_repr, (int)DRILLBIT_MAX_RESULT_NONCES); nonces = DRILLBIT_MAX_RESULT_NONCES; } applog(LOG_DEBUG, "%"PRIpreprv": Handling completion of %d nonces. is_idle=%d work=%p next_work=%p", proc->proc_repr, nonces, is_idle, thr->work, thr->next_work); const uint32_t *nonce_p = (void*)&buf[4]; for (j = 0; j < nonces; ++j, ++nonce_p) { uint32_t nonce = bitfury_decnonce(*nonce_p); if (bitfury_fudge_nonce2(thr->work, &nonce)) submit_nonce(thr, thr->work, nonce); else if (bitfury_fudge_nonce2(thr->next_work, &nonce)) { applog(LOG_DEBUG, "%"PRIpreprv": Result for next work, transitioning", proc->proc_repr); submit_nonce(thr, thr->next_work, nonce); mt_job_transition(thr); job_start_complete(thr); } else if (bitfury_fudge_nonce2(thr->prev_work, &nonce)) { applog(LOG_DEBUG, "%"PRIpreprv": Result for PREVIOUS work", proc->proc_repr); submit_nonce(thr, thr->prev_work, nonce); } else inc_hw_errors(thr, thr->work, nonce); } if (is_idle && thr->next_work) { applog(LOG_DEBUG, "%"PRIpreprv": Chip went idle without any results for next work", proc->proc_repr); mt_job_transition(thr); job_start_complete(thr); } if (!thr->next_work) timer_set_now(&thr->tv_morework); } return true; } static void drillbit_poll(struct thr_info * const master_thr) { struct cgpu_info * const dev = master_thr->cgpu; struct drillbit_board * const board = dev->device_data; if (!drillbit_ensure_configured(dev)) return; drillbit_get_work_results(dev); if (board->need_reinit) { applog(LOG_NOTICE, "%s: Reinitialisation needed for configuration changes", dev->dev_repr); drillbit_reconfigure(dev, false); board->need_reinit = false; } if (board->trigger_identify) { const int fd = dev->device_fd; applog(LOG_DEBUG, "%s: Sending identify command", dev->dev_repr); if (1 != write(fd, "L", 1)) applog(LOG_ERR, "%s: Error writing identify command", dev->dev_repr); drillbit_check_response(dev->dev_repr, fd, dev, 'L'); board->trigger_identify = false; } timer_set_delay_from_now(&master_thr->tv_poll, 10000); } static bool drillbit_identify(struct cgpu_info * const proc) { struct cgpu_info * const dev = proc->device; struct drillbit_board * const board = dev->device_data; board->trigger_identify = true; return true; } static bool drillbit_get_stats(struct cgpu_info * const dev) { if (dev != dev->device) return true; struct drillbit_board * const board = dev->device_data; if (!(board->caps & DBC_TEMP)) return true; const int fd = dev->device_fd; if (fd == -1) return false; if (1 != write(fd, "T", 1)) problem(false, LOG_ERR, "%s: Error requesting temperature", dev->dev_repr); uint8_t buf[2]; if (sizeof(buf) != serial_read(fd, buf, sizeof(buf))) problem(false, LOG_ERR, "%s: Short read in response to 'T'", dev->dev_repr); float temp = ((uint16_t)buf[0]) | ((uint16_t)buf[1] << 8); temp /= 10.; for (struct cgpu_info *proc = dev; proc; proc = proc->next_proc) proc->temp = temp; return true; } static float drillbit_voltagecfg_volts(const enum drillbit_voltagecfg vcfg) { switch (vcfg) { case DBV_650mV: return 0.65; case DBV_750mV: return 0.75; case DBV_850mV: return 0.85; case DBV_950mV: return 0.95; } return 0; } static void drillbit_clockcfg_str(char * const buf, size_t bufsz, struct drillbit_board * const board) { if (board->use_ext_clock) snprintf(buf, bufsz, "%u", board->ext_clock_freq); else snprintf(buf, bufsz, "L%u", board->clock_level); if (board->clock_div2) tailsprintf(buf, bufsz, ":2"); } static struct api_data *drillbit_api_stats(struct cgpu_info * const proc) { struct cgpu_info * const dev = proc->device; struct drillbit_board * const board = dev->device_data; struct api_data *root = NULL; char buf[0x100]; drillbit_clockcfg_str(buf, sizeof(buf), board); root = api_add_string(root, "ClockCfg", buf, true); float volts = drillbit_voltagecfg_volts(board->core_voltage_cfg); root = api_add_volts(root, "Voltage", &volts, true); return root; } static char *drillbit_set_device(struct cgpu_info * const proc, char * const option, char *setting, char * const replybuf) { struct cgpu_info * const dev = proc->device; struct drillbit_board * const board = dev->device_data; if (!strcasecmp(option, "help")) { sprintf(replybuf, "voltage: 0.65, 0.75, 0.85, or 0.95 (volts)\n" "clock: %sL0-L63 for internal clock levels; append :2 to activate div2", (board->caps & DBC_EXT_CLOCK) ? "0-255 (MHz) using external clock (80-230 recommended), or " : "" ); return replybuf; } if (!strcasecmp(option, "voltage")) { // NOTE: Do not use replybuf in here without implementing it in drillbit_tui_handle_choice if (!setting || !*setting) return "Missing voltage setting"; const int val = atof(setting) * 1000; enum drillbit_voltagecfg vcfg; switch (val) { case 650: case 649: vcfg = DBV_650mV; break; case 750: case 749: vcfg = DBV_750mV; break; case 850: case 849: vcfg = DBV_850mV; break; case 950: case 949: vcfg = DBV_950mV; break; default: return "Invalid voltage value"; } board->core_voltage_cfg = vcfg; board->need_reinit = true; return NULL; } if (!strcasecmp(option, "clock")) { // NOTE: Do not use replybuf in here without implementing it in drillbit_tui_handle_choice const bool use_ext_clock = !(setting[0] == 'L'); char *end = &setting[use_ext_clock ? 0 : 1]; const unsigned num = strtol(end, &end, 0); const bool div2 = (end[0] == ':' && end[1] == '2'); // NOTE: board assignments are ordered such that it is safe to race if (use_ext_clock) { if (!(board->caps & DBC_EXT_CLOCK)) return "External clock not supported by this device"; if (num < 0 || num > 255) return "External clock frequency out of range (0-255)"; board->clock_div2 = div2; board->ext_clock_freq = num; board->use_ext_clock = true; } else { if (num < 0 || num > 63) return "Internal clock level out of range (0-63)"; board->clock_div2 = div2; board->clock_level = num; board->use_ext_clock = false; } board->need_reinit = true; return NULL; } sprintf(replybuf, "Unknown option: %s", option); return replybuf; } #ifdef HAVE_CURSES static void drillbit_tui_wlogprint_choices(struct cgpu_info * const proc) { wlogprint("[C]lock [V]oltage "); } static const char *drillbit_tui_handle_choice(struct cgpu_info * const proc, const int input) { char *val; switch (input) { case 'c': case 'C': val = curses_input("Set clock (80-230 MHz using external clock, or L0-L63 for internal clock levels; append :2 to activate div2"); return drillbit_set_device(proc, "clock", val, NULL) ?: "Requesting clock change"; case 'v': case 'V': val = curses_input("Set voltage (0.65, 0.75, 0.85, or 0.95)"); return drillbit_set_device(proc, "voltage", val, NULL) ?: "Requesting voltage change"; } return NULL; } static void drillbit_wlogprint_status(struct cgpu_info * const proc) { struct cgpu_info * const dev = proc->device; struct drillbit_board * const board = dev->device_data; char buf[0x100]; drillbit_clockcfg_str(buf, sizeof(buf), board); wlogprint("Clock: %s\n", buf); wlogprint("Voltage: %.2f\n", drillbit_voltagecfg_volts(board->core_voltage_cfg)); } #endif struct device_drv drillbit_drv = { .dname = "drillbit", .name = "DRB", .lowl_match = drillbit_lowl_match, .lowl_probe = drillbit_lowl_probe, .thread_init = drillbit_init, .minerloop = minerloop_async, .job_prepare = drillbit_job_prepare, .job_start = drillbit_first_job_start, .job_process_results = drillbit_job_process_results, .poll = drillbit_poll, .get_stats = drillbit_get_stats, .identify_device = drillbit_identify, .get_api_stats = drillbit_api_stats, .set_device = drillbit_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = drillbit_wlogprint_status, .proc_tui_wlogprint_choices = drillbit_tui_wlogprint_choices, .proc_tui_handle_choice = drillbit_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-erupter.c000066400000000000000000000056441226556647300203270ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include "miner.h" #include "icarus-common.h" #include "lowlevel.h" #include "lowl-vcom.h" #define ERUPTER_IO_SPEED 115200 #define ERUPTER_HASH_TIME 0.0000000029761 BFG_REGISTER_DRIVER(erupter_drv) BFG_REGISTER_DRIVER(erupter_drv_emerald) static bool _erupter_detect_one(const char *devpath, struct device_drv *drv) { struct ICARUS_INFO *info = calloc(1, sizeof(struct ICARUS_INFO)); if (unlikely(!info)) quit(1, "Failed to malloc ICARUS_INFO"); *info = (struct ICARUS_INFO){ .baud = ERUPTER_IO_SPEED, .Hs = ERUPTER_HASH_TIME, .timing_mode = MODE_DEFAULT, .continue_search = true, }; if (!icarus_detect_custom(devpath, drv, info)) { free(info); return false; } return true; } static bool erupter_emerald_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "Block", "Erupter", "Emerald"); } static bool erupter_emerald_detect_one(const char *devpath) { // For detection via BEE:* return _erupter_detect_one(devpath, &erupter_drv_emerald); } static bool erupter_emerald_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, erupter_emerald_detect_one); } static bool erupter_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_lowlproduct(info, &lowl_vcom, "Block", "Erupter"); } static bool erupter_detect_one(const char *devpath) { struct device_drv *drv = &erupter_drv; // For autodetection if (unlikely(detectone_meta_info.product && strstr(detectone_meta_info.product, "Emerald"))) drv = &erupter_drv_emerald; return _erupter_detect_one(devpath, drv); } static bool erupter_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, erupter_detect_one); } static bool erupter_identify(struct cgpu_info *erupter) { struct thr_info *thr = erupter->thr[0]; struct icarus_state *state = thr->cgpu_data; state->identify = true; return true; } static void erupter_drv_init() { erupter_drv = icarus_drv; erupter_drv.dname = "erupter"; erupter_drv.name = "BES"; erupter_drv.lowl_match = erupter_lowl_match; erupter_drv.lowl_probe = erupter_lowl_probe; erupter_drv.identify_device = erupter_identify; ++erupter_drv.probe_priority; erupter_drv_emerald = erupter_drv; erupter_drv_emerald.name = "BEE"; erupter_drv.lowl_match = erupter_emerald_lowl_match; erupter_drv.lowl_probe = erupter_emerald_lowl_probe; ++erupter_drv_emerald.probe_priority; } struct device_drv erupter_drv = { .drv_init = erupter_drv_init, }; struct device_drv erupter_drv_emerald = { .drv_init = erupter_drv_init, }; bfgminer-bfgminer-3.10.0/driver-getwork.c000066400000000000000000000132771226556647300203240ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef WIN32 #include #endif #include #ifndef WIN32 #include #include #include #endif #include #include #include #include "deviceapi.h" #include "driver-proxy.h" #include "httpsrv.h" #include "miner.h" static void getwork_prepare_resp(struct MHD_Response *resp) { httpsrv_prepare_resp(resp); MHD_add_response_header(resp, MHD_HTTP_HEADER_CONTENT_TYPE, "application/json"); MHD_add_response_header(resp, "X-Mining-Extensions", "hashesdone"); } static struct MHD_Response *getwork_gen_error(int16_t errcode, const char *errmsg, const char *idstr, size_t idstr_sz) { size_t replysz = 0x40 + strlen(errmsg) + idstr_sz; char * const reply = malloc(replysz); replysz = snprintf(reply, replysz, "{\"result\":null,\"error\":{\"code\":%d,\"message\":\"%s\"},\"id\":%s}", errcode, errmsg, idstr ?: "0"); struct MHD_Response * const resp = MHD_create_response_from_buffer(replysz, reply, MHD_RESPMEM_MUST_FREE); getwork_prepare_resp(resp); return resp; } static int getwork_error(struct MHD_Connection *conn, int16_t errcode, const char *errmsg, const char *idstr, size_t idstr_sz) { struct MHD_Response * const resp = getwork_gen_error(errcode, errmsg, idstr, idstr_sz); const int ret = MHD_queue_response(conn, 500, resp); MHD_destroy_response(resp); return ret; } int handle_getwork(struct MHD_Connection *conn, bytes_t *upbuf) { struct proxy_client *client; struct MHD_Response *resp; char *user, *idstr = NULL; const char *submit = NULL; size_t idstr_sz = 1; struct cgpu_info *cgpu; struct thr_info *thr; json_t *json = NULL, *j2; json_error_t jerr; struct work *work; char *reply; const char *hashesdone = NULL; int ret; if (bytes_len(upbuf)) { bytes_nullterminate(upbuf); json = JSON_LOADS((char*)bytes_buf(upbuf), &jerr); if (!json) { ret = getwork_error(conn, -32700, "JSON parse error", idstr, idstr_sz); goto out; } j2 = json_object_get(json, "id"); if (j2) { idstr = json_dumps_ANY(j2, 0); idstr_sz = strlen(idstr); } if (strcmp("getwork", bfg_json_obj_string(json, "method", "getwork"))) { ret = getwork_error(conn, -32601, "Only getwork supported", idstr, idstr_sz); goto out; } j2 = json_object_get(json, "params"); submit = j2 ? __json_array_string(j2, 0) : NULL; } user = MHD_basic_auth_get_username_password(conn, NULL); if (!user) { resp = getwork_gen_error(-4096, "Please provide a username", idstr, idstr_sz); ret = MHD_queue_basic_auth_fail_response(conn, PACKAGE, resp); goto out; } client = proxy_find_or_create_client(user); free(user); if (!client) { ret = getwork_error(conn, -32603, "Failed creating new cgpu", idstr, idstr_sz); goto out; } cgpu = client->cgpu; thr = cgpu->thr[0]; hashesdone = MHD_lookup_connection_value(conn, MHD_HEADER_KIND, "X-Hashes-Done"); if (submit) { unsigned char hdr[80]; const char *rejreason; uint32_t nonce; // NOTE: expecting hex2bin to fail since we only parse 80 of the 128 hex2bin(hdr, submit, 80); nonce = le32toh(*(uint32_t *)&hdr[76]); HASH_FIND(hh, client->work, hdr, 76, work); if (!work) { inc_hw_errors2(thr, NULL, &nonce); rejreason = "unknown-work"; } else { if (!submit_nonce(thr, work, nonce)) rejreason = "H-not-zero"; else if (stale_work(work, true)) rejreason = "stale"; else rejreason = NULL; if (!hashesdone) hashesdone = "0x100000000"; } reply = malloc(36 + idstr_sz); const size_t replysz = sprintf(reply, "{\"error\":null,\"result\":%s,\"id\":%s}", rejreason ? "false" : "true", idstr); resp = MHD_create_response_from_buffer(replysz, reply, MHD_RESPMEM_MUST_FREE); getwork_prepare_resp(resp); MHD_add_response_header(resp, "X-Mining-Identifier", cgpu->proc_repr); if (rejreason) MHD_add_response_header(resp, "X-Reject-Reason", rejreason); ret = MHD_queue_response(conn, 200, resp); MHD_destroy_response(resp); goto out; } if (cgpu->deven == DEV_DISABLED) { resp = getwork_gen_error(-10, "Virtual device has been disabled", idstr, idstr_sz); MHD_add_response_header(resp, "X-Mining-Identifier", cgpu->proc_repr); ret = MHD_queue_response(conn, 500, resp); MHD_destroy_response(resp); goto out; } { const size_t replysz = 590 + idstr_sz; work = get_work(thr); reply = malloc(replysz); memcpy(reply, "{\"error\":null,\"result\":{\"target\":\"ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000\",\"data\":\"", 108); bin2hex(&reply[108], work->data, 128); memcpy(&reply[364], "\",\"midstate\":\"", 14); bin2hex(&reply[378], work->midstate, 32); memcpy(&reply[442], "\",\"hash1\":\"00000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000010000\"},\"id\":", 147); memcpy(&reply[589], idstr ?: "0", idstr_sz); memcpy(&reply[589 + idstr_sz], "}", 1); timer_set_now(&work->tv_work_start); HASH_ADD_KEYPTR(hh, client->work, work->data, 76, work); resp = MHD_create_response_from_buffer(replysz, reply, MHD_RESPMEM_MUST_FREE); getwork_prepare_resp(resp); MHD_add_response_header(resp, "X-Mining-Identifier", cgpu->proc_repr); ret = MHD_queue_response(conn, 200, resp); MHD_destroy_response(resp); } out: if (hashesdone) hashes_done2(thr, strtoll(hashesdone, NULL, 0), NULL); free(idstr); if (json) json_decref(json); return ret; } bfgminer-bfgminer-3.10.0/driver-hashbuster.c000066400000000000000000000174341226556647300210110ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * Copyright 2013 Vladimir Strinski * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include "deviceapi.h" #include "driver-bitfury.h" #include "libbitfury.h" #include "logging.h" #include "lowlevel.h" #include "lowl-hid.h" #include "miner.h" #define HASHBUSTER_USB_PRODUCT "HashBuster" #define HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER 62 BFG_REGISTER_DRIVER(hashbuster_drv) static bool hashbuster_io(hid_device * const h, void * const buf, const void * const cmd) { const uint8_t cmdbyte = *((uint8_t *)cmd); char x[0x81]; if (unlikely(opt_dev_protocol)) { bin2hex(x, cmd, 0x40); applog(LOG_DEBUG, "%s(%p): SEND: %s", __func__, h, x); } const bool rv = likely( 0x40 == hid_write(h, cmd, 0x40) && 0x40 == hid_read (h, buf, 0x40) && ((uint8_t *)buf)[0] == cmdbyte ); if (unlikely(opt_dev_protocol)) { bin2hex(x, buf, 0x40); applog(LOG_DEBUG, "%s(%p): RECV: %s", __func__, h, x); } return rv; } static bool hashbuster_spi_config(hid_device * const h, const uint8_t mode, const uint8_t miso, const uint32_t freq) { uint8_t buf[0x40] = {'\x01', '\x01', mode, miso}; switch (freq) { case 100000: buf[4] = '\0'; break; case 750000: buf[4] = '\x01'; break; case 3000000: buf[4] = '\x02'; break; case 12000000: buf[4] = '\x03'; break; default: return false; } if (!hashbuster_io(h, buf, buf)) return false; return (buf[1] == '\x0f'); } static bool hashbuster_spi_disable(hid_device * const h) { uint8_t buf[0x40] = {'\x01'}; if (!hashbuster_io(h, buf, buf)) return false; return (buf[1] == '\x0f'); } static bool hashbuster_spi_reset(hid_device * const h, uint8_t chips) { uint8_t buf[0x40] = {'\x02', chips}; if (!hashbuster_io(h, buf, buf)) return false; return (buf[1] == '\xff'); } static bool hashbuster_spi_transfer(hid_device * const h, void * const buf, const void * const data, size_t datasz) { if (datasz > HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER) return false; uint8_t cbuf[0x40] = {'\x03', datasz}; memcpy(&cbuf[2], data, datasz); if (!hashbuster_io(h, cbuf, cbuf)) return false; if (cbuf[1] != datasz) return false; memcpy(buf, &cbuf[2], datasz); return true; } static bool hashbuster_spi_txrx(struct spi_port * const port) { hid_device * const h = port->userp; const uint8_t *wrbuf = spi_gettxbuf(port); uint8_t *rdbuf = spi_getrxbuf(port); size_t bufsz = spi_getbufsz(port); hashbuster_spi_disable(h); hashbuster_spi_reset(h, 0x10); hashbuster_spi_config(h, port->mode, 0, port->speed); while (bufsz >= HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER) { if (!hashbuster_spi_transfer(h, rdbuf, wrbuf, HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER)) return false; rdbuf += HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER; wrbuf += HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER; bufsz -= HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER; } if (bufsz > 0) { if (!hashbuster_spi_transfer(h, rdbuf, wrbuf, bufsz)) return false; } return true; } static bool hashbuster_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_lowlproduct(info, &lowl_hid, HASHBUSTER_USB_PRODUCT); } static int hashbuster_chip_count(hid_device *h) { /* Do not allocate spi_port on the stack! OS X, at least, has a 512 KB default stack size for secondary threads */ struct spi_port *spi = malloc(sizeof(*spi)); spi->txrx = hashbuster_spi_txrx; spi->userp = h; spi->repr = hashbuster_drv.dname; spi->logprio = LOG_DEBUG; spi->speed = 100000; spi->mode = 0; const int chip_count = libbitfury_detectChips1(spi); free(spi); return chip_count; } static bool hashbuster_lowl_probe(const struct lowlevel_device_info * const info) { const char * const product = info->product; const char * const serial = info->serial; char * const path = info->path; hid_device *h; uint8_t buf[0x40] = {'\xfe'}; if (info->lowl != &lowl_hid) applogr(false, LOG_DEBUG, "%s: Matched \"%s\" serial \"%s\", but lowlevel driver is not hid!", __func__, product, serial); if (info->vid != 0xFA04 || info->pid != 0x0011) applogr(false, LOG_DEBUG, "%s: Wrong VID/PID", __func__); h = hid_open_path(path); if (!h) applogr(false, LOG_WARNING, "%s: Failed to open HID path %s", __func__, path); if ((!hashbuster_io(h, buf, buf)) || buf[1] != 0x07) applogr(false, LOG_DEBUG, "%s: Identify sequence didn't match on %s", __func__, path); const int chip_n = hashbuster_chip_count(h); hid_close(h); if (lowlevel_claim(&hashbuster_drv, true, info)) return false; struct cgpu_info *cgpu; cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &hashbuster_drv, .device_data = lowlevel_ref(info), .threads = 1, .procs = chip_n, .device_path = strdup(info->path), .dev_manufacturer = maybe_strdup(info->manufacturer), .dev_product = maybe_strdup(product), .dev_serial = maybe_strdup(serial), .deven = DEV_ENABLED, }; return add_cgpu(cgpu); } static bool hashbuster_init(struct thr_info * const thr) { struct cgpu_info * const cgpu = thr->cgpu, *proc; struct bitfury_device *bitfury; struct spi_port *port; hid_device *h; h = hid_open_path(cgpu->device_path); lowlevel_devinfo_free(cgpu->device_data); if (!h) applogr(false, LOG_ERR, "%s: Failed to open hid device", cgpu->dev_repr); port = malloc(sizeof(*port)); if (!port) applogr(false, LOG_ERR, "%s: Failed to allocate spi_port", cgpu->dev_repr); /* Be careful, read spidevc.h comments for warnings */ memset(port, 0, sizeof(*port)); port->txrx = hashbuster_spi_txrx; port->userp = h; port->cgpu = cgpu; port->repr = cgpu->dev_repr; port->logprio = LOG_ERR; port->speed = 100000; port->mode = 0; for (proc = cgpu; proc; proc = proc->next_proc) { bitfury = malloc(sizeof(*bitfury)); if (!bitfury) { applog(LOG_ERR, "%"PRIpreprv": Failed to allocate bitfury_device", cgpu->proc_repr); proc->status = LIFE_DEAD2; continue; } *bitfury = (struct bitfury_device){ .spi = port, }; proc->device_data = bitfury; bitfury_init_chip(proc); bitfury->osc6_bits = 53; bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury_init_freq_stat(&bitfury->chip_stat, 52, 56); } timer_set_now(&thr->tv_poll); cgpu->status = LIFE_INIT2; return true; } static bool hashbuster_get_stats(struct cgpu_info * const cgpu) { struct cgpu_info *proc; if (cgpu != cgpu->device) return true; struct bitfury_device * const bitfury = cgpu->device_data; struct spi_port * const spi = bitfury->spi; hid_device * const h = spi->userp; uint8_t buf[0x40] = {'\x04'}; if (!hashbuster_io(h, buf, buf)) return false; if (buf[1]) { for (proc = cgpu; proc; proc = proc->next_proc) proc->temp = buf[1]; } return true; } struct device_drv hashbuster_drv = { .dname = "hashbuster", .name = "HBR", .lowl_match = hashbuster_lowl_match, .lowl_probe = hashbuster_lowl_probe, .thread_init = hashbuster_init, .thread_disable = bitfury_disable, .thread_enable = bitfury_enable, .thread_shutdown = bitfury_shutdown, .minerloop = minerloop_async, .job_prepare = bitfury_job_prepare, .job_start = bitfury_noop_job_start, .poll = bitfury_do_io, .job_process_results = bitfury_job_process_results, .get_stats = hashbuster_get_stats, .get_api_extra_device_detail = bitfury_api_device_detail, .get_api_extra_device_status = bitfury_api_device_status, .set_device = bitfury_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = bitfury_wlogprint_status, .proc_tui_wlogprint_choices = bitfury_tui_wlogprint_choices, .proc_tui_handle_choice = bitfury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-hashbusterusb.c000066400000000000000000000367151226556647300215260ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * Copyright 2013 Vladimir Strinski * Copyright 2013 HashBuster team * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include "deviceapi.h" #include "driver-bitfury.h" #include "libbitfury.h" #include "logging.h" #include "lowlevel.h" #include "lowl-usb.h" #include "miner.h" #define HASHBUSTER_USB_PRODUCT "HashBuster" #define HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER 61 BFG_REGISTER_DRIVER(hashbusterusb_drv) struct hashbusterusb_state { uint16_t voltage; struct timeval identify_started; bool identify_requested; }; static bool hashbusterusb_io(struct lowl_usb_endpoint * const h, unsigned char *buf, unsigned char *cmd) { char x[0x81]; bool rv = true; if (unlikely(opt_dev_protocol)) { bin2hex(x, cmd, 0x40); applog(LOG_DEBUG, "%s(%p): SEND: %s", __func__, h, x); } do // Workaround for PIC USB buffer corruption. We should repeat last packet if receive FF { do { usb_write(h, cmd, 64); } while (usb_read(h, buf, 64) != 64); } while(buf[0]==0xFF); if (unlikely(opt_dev_protocol)) { bin2hex(x, buf, 0x40); applog(LOG_DEBUG, "%s(%p): RECV: %s", __func__, h, x); } return rv; } static bool hashbusterusb_spi_config(struct lowl_usb_endpoint * const h, const uint8_t mode, const uint8_t miso, const uint32_t freq) { uint8_t buf[0x40] = {'\x01', '\x01'}; if (!hashbusterusb_io(h, buf, buf)) return false; return (buf[1] == '\x00'); } static bool hashbusterusb_spi_disable(struct lowl_usb_endpoint * const h) { uint8_t buf[0x40] = {'\x01', '\x00'}; if (!hashbusterusb_io(h, buf, buf)) return false; return (buf[1] == '\x00'); } static bool hashbusterusb_spi_reset(struct lowl_usb_endpoint * const h, uint8_t chips) { uint8_t buf[0x40] = {'\x02', '\x00', chips}; if (!hashbusterusb_io(h, buf, buf)) return false; return (buf[1] == '\x00'); } static bool hashbusterusb_spi_transfer(struct lowl_usb_endpoint * const h, void * const buf, const void * const data, size_t datasz) { if (datasz > HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER) return false; uint8_t cbuf[0x40] = {'\x03', '\x00', datasz}; memcpy(&cbuf[3], data, datasz); if (!hashbusterusb_io(h, cbuf, cbuf)) return false; if (cbuf[2] != datasz) return false; memcpy(buf, &cbuf[3], datasz); return true; } static bool hashbusterusb_spi_txrx(struct spi_port * const port) { struct lowl_usb_endpoint * const h = port->userp; const uint8_t *wrbuf = spi_gettxbuf(port); uint8_t *rdbuf = spi_getrxbuf(port); size_t bufsz = spi_getbufsz(port); hashbusterusb_spi_disable(h); hashbusterusb_spi_reset(h, 0x10); hashbusterusb_spi_config(h, port->mode, 0, port->speed); while (bufsz >= HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER) { if (!hashbusterusb_spi_transfer(h, rdbuf, wrbuf, HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER)) return false; rdbuf += HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER; wrbuf += HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER; bufsz -= HASHBUSTER_MAX_BYTES_PER_SPI_TRANSFER; } if (bufsz > 0) { if (!hashbusterusb_spi_transfer(h, rdbuf, wrbuf, bufsz)) return false; } return true; } static bool hashbusterusb_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_id(info, &lowl_usb, 0xFA04, 0x000D); } static bool hashbusterusb_lowl_probe(const struct lowlevel_device_info * const info) { struct cgpu_info *cgpu = NULL; struct bitfury_device **devicelist, *bitfury; struct spi_port *port; int j; struct cgpu_info dummy_cgpu; const char * const product = info->product; char *serial = info->serial; libusb_device_handle *h; if (info->lowl != &lowl_usb) applogr(false, LOG_DEBUG, "%s: Matched \"%s\" %s, but lowlevel driver is not usb_generic!", __func__, product, info->devid); if (info->vid != 0xFA04 || info->pid != 0x000D) applogr(false, LOG_DEBUG, "%s: Wrong VID/PID", __func__); libusb_device *dev = info->lowl_data; if ( (j = libusb_open(dev, &h)) ) applogr(false, LOG_ERR, "%s: Failed to open %s: %s", __func__, info->devid, bfg_strerror(j, BST_LIBUSB)); if ( (j = libusb_set_configuration(h, 1)) ) applogr(false, LOG_ERR, "%s: Failed to set configuration 1 on %s: %s", __func__, info->devid, bfg_strerror(j, BST_LIBUSB)); if ( (j = libusb_claim_interface(h, 0)) ) applogr(false, LOG_ERR, "%s: Failed to claim interface 0 on %s: %s", __func__, info->devid, bfg_strerror(j, BST_LIBUSB)); struct lowl_usb_endpoint * const ep = usb_open_ep_pair(h, 0x81, 64, 0x01, 64); usb_ep_set_timeouts_ms(ep, 100, 0); unsigned char OUTPacket[64]; unsigned char INPacket[64]; OUTPacket[0] = 0xFE; hashbusterusb_io(ep, INPacket, OUTPacket); if (INPacket[1] == 0x18) { // Turn on miner PSU OUTPacket[0] = 0x10; OUTPacket[1] = 0x00; OUTPacket[2] = 0x01; hashbusterusb_io(ep, INPacket, OUTPacket); } OUTPacket[0] = '\x20'; hashbusterusb_io(ep, INPacket, OUTPacket); if (!memcmp(INPacket, "\x20\0", 2)) { // 64-bit BE serial number uint64_t sernum = 0; for (j = 0; j < 8; ++j) sernum |= (uint64_t)INPacket[j + 2] << (j * 8); serial = malloc((8 * 2) + 1); sprintf(serial, "%08"PRIX64, sernum); } else serial = maybe_strdup(info->serial); int chip_n; port = malloc(sizeof(*port)); port->cgpu = &dummy_cgpu; port->txrx = hashbusterusb_spi_txrx; port->userp = ep; port->repr = hashbusterusb_drv.dname; port->logprio = LOG_DEBUG; port->speed = 100000; port->mode = 0; chip_n = libbitfury_detectChips1(port); if (unlikely(!chip_n)) chip_n = libbitfury_detectChips1(port); if (unlikely(!chip_n)) { applog(LOG_WARNING, "%s: No chips found on %s (serial \"%s\")", __func__, info->devid, serial); fail: usb_close_ep(ep); free(port); free(serial); libusb_release_interface(h, 0); libusb_close(h); return false; } if (bfg_claim_libusb(&hashbusterusb_drv, true, dev)) goto fail; { devicelist = malloc(sizeof(*devicelist) * chip_n); for (j = 0; j < chip_n; ++j) { devicelist[j] = bitfury = malloc(sizeof(*bitfury)); *bitfury = (struct bitfury_device){ .spi = port, .slot = 0, .fasync = j, }; } cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &hashbusterusb_drv, .procs = chip_n, .device_data = devicelist, .cutofftemp = 200, .threads = 1, .device_path = strdup(info->devid), .dev_manufacturer = maybe_strdup(info->manufacturer), .dev_product = maybe_strdup(product), .dev_serial = serial, .deven = DEV_ENABLED, }; } return add_cgpu(cgpu); } static bool hashbusterusb_init(struct thr_info * const thr) { struct cgpu_info * const cgpu = thr->cgpu, *proc; struct bitfury_device **devicelist; struct bitfury_device *bitfury; struct hashbusterusb_state * const state = malloc(sizeof(*state)); *state = (struct hashbusterusb_state){ .voltage = 0, }; cgpu_setup_control_requests(cgpu); for (proc = thr->cgpu; proc; proc = proc->next_proc) { devicelist = proc->device_data; bitfury = devicelist[proc->proc_id]; proc->device_data = bitfury; proc->thr[0]->cgpu_data = state; bitfury->spi->cgpu = proc; bitfury_init_chip(proc); bitfury->osc6_bits = 53; bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury_init_freq_stat(&bitfury->chip_stat, 52, 56); if (proc->proc_id == proc->procs - 1) free(devicelist); } timer_set_now(&thr->tv_poll); cgpu->status = LIFE_INIT2; return true; } static void hashbusterusb_set_colour(struct cgpu_info *, uint8_t, uint8_t, uint8_t); static void hashbusterusb_poll(struct thr_info * const master_thr) { struct hashbusterusb_state * const state = master_thr->cgpu_data; struct cgpu_info * const cgpu = master_thr->cgpu; if (state->identify_requested) { if (!timer_isset(&state->identify_started)) hashbusterusb_set_colour(cgpu, 0xff, 0, 0xff); timer_set_delay_from_now(&state->identify_started, 5000000); state->identify_requested = false; } bitfury_do_io(master_thr); if (timer_passed(&state->identify_started, NULL)) { hashbusterusb_set_colour(cgpu, 0, 0x7e, 0); timer_unset(&state->identify_started); } } static bool hashbusterusb_get_stats(struct cgpu_info * const cgpu) { bool rv = false; struct cgpu_info *proc; if (cgpu != cgpu->device) return true; struct bitfury_device * const bitfury = cgpu->device_data; struct spi_port * const spi = bitfury->spi; struct lowl_usb_endpoint * const h = spi->userp; uint8_t buf[0x40] = {'\x04'}; if (hashbusterusb_io(h, buf, buf)) { if (buf[1]) { rv = true; for (proc = cgpu; proc; proc = proc->next_proc) proc->temp = buf[1]; } } buf[0] = '\x15'; if (hashbusterusb_io(h, buf, buf)) { if (!memcmp(buf, "\x15\0", 2)) { rv = true; const uint16_t voltage = (buf[3] << 8) | buf[2]; for (proc = cgpu; proc; proc = proc->next_proc) { struct hashbusterusb_state * const state = proc->thr[0]->cgpu_data; state->voltage = voltage; } } } return rv; } static void hashbusterusb_shutdown(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; struct bitfury_device * const bitfury = cgpu->device_data; struct spi_port * const spi = bitfury->spi; struct lowl_usb_endpoint * const h = spi->userp; // Shutdown PSU unsigned char OUTPacket[64]; unsigned char INPacket[64]; OUTPacket[0] = 0x10; OUTPacket[1] = 0x00; OUTPacket[2] = 0x00; hashbusterusb_io(h, INPacket, OUTPacket); } static void hashbusterusb_set_colour(struct cgpu_info * const cgpu, const uint8_t red, const uint8_t green, const uint8_t blue) { struct bitfury_device * const bitfury = cgpu->device_data; struct spi_port * const spi = bitfury->spi; struct lowl_usb_endpoint * const h = spi->userp; uint8_t buf[0x40] = {'\x30', 0, red, green, blue}; hashbusterusb_io(h, buf, buf); applog(LOG_DEBUG, "%s: Set LED colour to r=0x%x g=0x%x b=0x%x", cgpu->dev_repr, (unsigned)red, (unsigned)green, (unsigned)blue); } static bool hashbusterusb_identify(struct cgpu_info * const proc) { struct hashbusterusb_state * const state = proc->thr[0]->cgpu_data; state->identify_requested = true; return true; } static bool hashbusterusb_set_voltage(struct cgpu_info * const proc, const uint16_t nv) { struct bitfury_device * const bitfury = proc->device_data; struct spi_port * const spi = bitfury->spi; struct lowl_usb_endpoint * const h = spi->userp; unsigned char buf[0x40] = {0x11, 0, (nv & 0xff), (nv >> 8)}; hashbusterusb_io(h, buf, buf); return !memcmp(buf, "\x11\0", 2); } static bool hashbusterusb_vrm_unlock(struct cgpu_info * const proc, const char * const code) { struct bitfury_device * const bitfury = proc->device_data; struct spi_port * const spi = bitfury->spi; struct lowl_usb_endpoint * const h = spi->userp; unsigned char buf[0x40] = {0x12}; size_t size; size = strlen(code) >> 1; if (size > 63) size = 63; hex2bin(&buf[1], code, size); hashbusterusb_io(h, buf, buf); return !memcmp(buf, "\x12\0", 2); } static void hashbusterusb_vrm_lock(struct cgpu_info * const proc) { struct bitfury_device * const bitfury = proc->device_data; struct spi_port * const spi = bitfury->spi; struct lowl_usb_endpoint * const h = spi->userp; unsigned char buf[0x40] = {0x14}; hashbusterusb_io(h, buf, buf); } static struct api_data *hashbusterusb_api_extra_device_stats(struct cgpu_info * const cgpu) { struct hashbusterusb_state * const state = cgpu->thr[0]->cgpu_data; struct api_data *root = bitfury_api_device_status(cgpu); float volts = state->voltage; volts /= 1000.; root = api_add_volts(root, "Voltage", &volts, true); return root; } static char *hashbusterusb_set_device(struct cgpu_info * const proc, char * const option, char * const setting, char * const replybuf) { if (!strcasecmp(option, "help")) { bitfury_set_device(proc, option, setting, replybuf); tailsprintf(replybuf, 1024, "\nvrmlock: Lock the VRM voltage to safe range\nvrmunlock: Allow setting potentially unsafe voltages (requires unlock code)\nvoltage: Set voltage"); return replybuf; } if (!strcasecmp(option, "vrmlock")) { cgpu_request_control(proc->device); hashbusterusb_vrm_lock(proc); cgpu_release_control(proc->device); return NULL; } if (!strcasecmp(option, "vrmunlock")) { cgpu_request_control(proc->device); const bool rv = hashbusterusb_vrm_unlock(proc, setting); cgpu_release_control(proc->device); if (!rv) return "Unlock error"; return NULL; } if (!strcasecmp(option, "voltage")) { const int val = atof(setting) * 1000; if (val < 600 || val > 1100) return "Invalid PSU voltage value"; cgpu_request_control(proc->device); const bool rv = hashbusterusb_set_voltage(proc, val); cgpu_release_control(proc->device); if (!rv) return "Voltage change error"; return NULL; } return bitfury_set_device(proc, option, setting, replybuf); } #ifdef HAVE_CURSES void hashbusterusb_tui_wlogprint_choices(struct cgpu_info * const proc) { wlogprint("[V]oltage "); wlogprint("[O]scillator bits "); //wlogprint("[F]an speed "); // To be implemented wlogprint("[U]nlock VRM "); wlogprint("[L]ock VRM "); } const char *hashbusterusb_tui_handle_choice(struct cgpu_info * const proc, const int input) { switch (input) { case 'v': case 'V': { const int val = curses_int("Set PSU voltage (range 600mV-1100mV. VRM unlock is required for over 870mV)"); if (val < 600 || val > 1100) return "Invalid PSU voltage value\n"; cgpu_request_control(proc->device); const bool rv = hashbusterusb_set_voltage(proc, val); cgpu_release_control(proc->device); if (!rv) return "Voltage change error\n"; return "Voltage change successful\n"; } case 'u': case 'U': { char *input = curses_input("VRM unlock code"); if (!input) input = calloc(1, 1); cgpu_request_control(proc->device); const bool rv = hashbusterusb_vrm_unlock(proc, input); cgpu_release_control(proc->device); free(input); if (!rv) return "Unlock error\n"; return "Unlocking PSU\n"; } case 'o': case 'O': return bitfury_tui_handle_choice(proc, input); case 'l': case 'L': { cgpu_request_control(proc->device); hashbusterusb_vrm_lock(proc); cgpu_release_control(proc->device); return "VRM lock\n"; } } return NULL; } void hashbusterusb_wlogprint_status(struct cgpu_info * const proc) { struct hashbusterusb_state * const state = proc->thr[0]->cgpu_data; bitfury_wlogprint_status(proc); wlogprint("PSU voltage: %umV\n", (unsigned)state->voltage); } #endif struct device_drv hashbusterusb_drv = { .dname = "hashbusterusb", .name = "HBR", .lowl_match = hashbusterusb_lowl_match, .lowl_probe = hashbusterusb_lowl_probe, .thread_init = hashbusterusb_init, .thread_disable = bitfury_disable, .thread_enable = bitfury_enable, .thread_shutdown = hashbusterusb_shutdown, .minerloop = minerloop_async, .job_prepare = bitfury_job_prepare, .job_start = bitfury_noop_job_start, .poll = hashbusterusb_poll, .job_process_results = bitfury_job_process_results, .get_stats = hashbusterusb_get_stats, .get_api_extra_device_detail = bitfury_api_device_detail, .get_api_extra_device_status = hashbusterusb_api_extra_device_stats, .set_device = hashbusterusb_set_device, .identify_device = hashbusterusb_identify, #ifdef HAVE_CURSES .proc_wlogprint_status = hashbusterusb_wlogprint_status, .proc_tui_wlogprint_choices = hashbusterusb_tui_wlogprint_choices, .proc_tui_handle_choice = hashbusterusb_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-hashfast.c000066400000000000000000000402131226556647300204310ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include "deviceapi.h" #include "logging.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "util.h" BFG_REGISTER_DRIVER(hashfast_ums_drv) #define HASHFAST_QUEUE_MEMORY 0x20 #define HASHFAST_ALL_CHIPS 0xff #define HASHFAST_ALL_CORES 0xff #define HASHFAST_HEADER_SIZE 8 #define HASHFAST_MAX_DATA 0x3fc #define HASHFAST_HASH_SIZE (0x20 + 0xc + 4 + 4 + 2 + 1 + 1) enum hashfast_opcode { HFOP_NULL = 0, HFOP_ROOT = 1, HFOP_RESET = 2, HFOP_PLL_CONFIG = 3, HFOP_ADDRESS = 4, HFOP_READDRESS = 5, HFOP_HIGHEST = 6, HFOP_BAUD = 7, HFOP_UNROOT = 8, HFOP_HASH = 9, HFOP_NONCE = 0x0a, HFOP_ABORT = 0x0b, HFOP_STATUS = 0x0c, HFOP_GPIO = 0x0d, HFOP_CONFIG = 0x0e, HFOP_STATISTICS = 0x0f, HFOP_GROUP = 0x10, HFOP_CLOCKGATE = 0x11, HFOP_USB_INIT = 0x80, HFOP_GET_TRACE = 0x81, HFOP_LOOPBACK_USB = 0x82, HFOP_LOOPBACK_UART = 0x83, HFOP_DFU = 0x84, HFOP_USB_SHUTDOWN = 0x85, HFOP_DIE_STATUS = 0x86, HFOP_GWQ_STATUS = 0x87, HFOP_WORK_RESTART = 0x88, HFOP_USB_STATS1 = 0x89, HFOP_USB_GWQSTATS = 0x8a, HFOP_USB_NOTICE = 0x8b, HFOP_USB_DEBUG = 0xff, }; typedef unsigned long hashfast_isn_t; struct hashfast_parsed_msg { uint8_t opcode; uint8_t chipaddr; uint8_t coreaddr; uint16_t hdata; uint8_t data[HASHFAST_MAX_DATA]; size_t datalen; }; static ssize_t hashfast_write(const int fd, void * const buf, size_t bufsz) { const ssize_t rv = write(fd, buf, bufsz); if (opt_debug && opt_dev_protocol) { char hex[(bufsz * 2) + 1]; bin2hex(hex, buf, bufsz); if (rv < 0) applog(LOG_DEBUG, "%s fd=%d: SEND (%s) => %d", "hashfast", fd, hex, (int)rv); else if (rv < bufsz) applog(LOG_DEBUG, "%s fd=%d: SEND %.*s(%s)", "hashfast", fd, rv * 2, hex, &hex[rv * 2]); else if (rv > bufsz) applog(LOG_DEBUG, "%s fd=%d: SEND %s => +%d", "hashfast", fd, hex, (int)(rv - bufsz)); else applog(LOG_DEBUG, "%s fd=%d: SEND %s", "hashfast", fd, hex); } return rv; } static ssize_t hashfast_read(const int fd, void * const buf, size_t bufsz) { const ssize_t rv = serial_read(fd, buf, bufsz); if (opt_debug && opt_dev_protocol && rv) { char hex[(rv * 2) + 1]; bin2hex(hex, buf, rv); applog(LOG_DEBUG, "%s fd=%d: RECV %s", "hashfast", fd, hex); } return rv; } static bool hashfast_prepare_msg(uint8_t * const buf, const uint8_t opcode, const uint8_t chipaddr, const uint8_t coreaddr, const uint16_t hdata, const size_t datalen) { buf[0] = '\xaa'; buf[1] = opcode; buf[2] = chipaddr; buf[3] = coreaddr; buf[4] = hdata & 0xff; buf[5] = hdata >> 8; if (datalen > 1020 || datalen % 4) return false; buf[6] = datalen / 4; buf[7] = crc8ccitt(&buf[1], 6); return true; } static bool hashfast_send_msg(const int fd, uint8_t * const buf, const uint8_t opcode, const uint8_t chipaddr, const uint8_t coreaddr, const uint16_t hdata, const size_t datalen) { if (!hashfast_prepare_msg(buf, opcode, chipaddr, coreaddr, hdata, datalen)) return false; const size_t buflen = HASHFAST_HEADER_SIZE + datalen; return (buflen == hashfast_write(fd, buf, buflen)); } static bool hashfast_parse_msg(const int fd, struct hashfast_parsed_msg * const out_msg) { uint8_t buf[HASHFAST_HEADER_SIZE]; startover: if (HASHFAST_HEADER_SIZE != hashfast_read(fd, buf, HASHFAST_HEADER_SIZE)) return false; uint8_t *p = memchr(buf, '\xaa', HASHFAST_HEADER_SIZE); if (p != buf) { ignoresome: if (!p) goto startover; int moreneeded = p - buf; int alreadyhave = HASHFAST_HEADER_SIZE - moreneeded; memmove(buf, p, alreadyhave); if (moreneeded != hashfast_read(fd, &buf[alreadyhave], moreneeded)) return false; } const uint8_t correct_crc8 = crc8ccitt(&buf[1], 6); if (buf[7] != correct_crc8) { p = memchr(&buf[1], '\xaa', HASHFAST_HEADER_SIZE - 1); goto ignoresome; } out_msg->opcode = buf[1]; out_msg->chipaddr = buf[2]; out_msg->coreaddr = buf[3]; out_msg->hdata = (uint16_t)buf[4] | ((uint16_t)buf[5] << 8); out_msg->datalen = buf[6] * 4; return (out_msg->datalen == hashfast_read(fd, &out_msg->data[0], out_msg->datalen)); } static bool hashfast_lowl_match(const struct lowlevel_device_info * const info) { if (!lowlevel_match_id(info, &lowl_vcom, 0, 0)) return false; return (info->manufacturer && strstr(info->manufacturer, "HashFast")); } static bool hashfast_detect_one(const char * const devpath) { uint16_t clock = 550; uint8_t buf[HASHFAST_HEADER_SIZE]; const int fd = serial_open(devpath, 0, 100, true); if (fd == -1) { applog(LOG_DEBUG, "%s: Failed to open %s", __func__, devpath); return false; } struct hashfast_parsed_msg * const pmsg = malloc(sizeof(*pmsg)); hashfast_send_msg(fd, buf, HFOP_USB_INIT, 0, 0, clock, 0); do { if (!hashfast_parse_msg(fd, pmsg)) { applog(LOG_DEBUG, "%s: Failed to parse response on %s", __func__, devpath); serial_close(fd); goto err; } } while (pmsg->opcode != HFOP_USB_INIT); serial_close(fd); const int expectlen = 0x20 + (pmsg->chipaddr * pmsg->coreaddr) / 8; if (pmsg->datalen < expectlen) { applog(LOG_DEBUG, "%s: USB_INIT response too short on %s (%d < %d)", __func__, devpath, (int)pmsg->datalen, expectlen); goto err; } if (pmsg->data[8] != 0) { applog(LOG_DEBUG, "%s: USB_INIT failed on %s (err=%d)", __func__, devpath, pmsg->data[8]); goto err; } struct cgpu_info * const cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &hashfast_ums_drv, .device_path = strdup(devpath), .deven = DEV_ENABLED, .procs = (pmsg->chipaddr * pmsg->coreaddr), .threads = 1, .device_data = pmsg, }; return add_cgpu(cgpu); err: free(pmsg); return false; } static bool hashfast_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, hashfast_detect_one); } struct hashfast_dev_state { uint8_t cores_per_chip; int fd; struct hashfast_chip_state *chipstates; }; struct hashfast_chip_state { struct cgpu_info **coreprocs; hashfast_isn_t last_isn; }; struct hashfast_core_state { uint8_t chipaddr; uint8_t coreaddr; int next_device_id; uint8_t last_seq; hashfast_isn_t last_isn; hashfast_isn_t last2_isn; bool has_pending; unsigned queued; }; static bool hashfast_init(struct thr_info * const master_thr) { struct cgpu_info * const dev = master_thr->cgpu, *proc; struct hashfast_parsed_msg * const pmsg = dev->device_data; struct hashfast_dev_state * const devstate = malloc(sizeof(*devstate)); struct hashfast_chip_state * const chipstates = malloc(sizeof(*chipstates) * pmsg->chipaddr), *chipstate; struct hashfast_core_state * const corestates = malloc(sizeof(*corestates) * dev->procs), *cs; int i; *devstate = (struct hashfast_dev_state){ .chipstates = chipstates, .cores_per_chip = pmsg->coreaddr, .fd = serial_open(dev->device_path, 0, 1, true), }; for (i = 0; i < pmsg->chipaddr; ++i) { chipstate = &chipstates[i]; *chipstate = (struct hashfast_chip_state){ .coreprocs = malloc(sizeof(struct cgpu_info *) * pmsg->coreaddr), }; } for ((i = 0), (proc = dev); proc; ++i, (proc = proc->next_proc)) { struct thr_info * const thr = proc->thr[0]; const bool core_is_working = pmsg->data[0x20 + (i / 8)] & (1 << (i % 8)); if (!core_is_working) proc->deven = DEV_RECOVER_DRV; proc->device_data = devstate; thr->cgpu_data = cs = &corestates[i]; *cs = (struct hashfast_core_state){ .chipaddr = i / pmsg->coreaddr, .coreaddr = i % pmsg->coreaddr, }; chipstates[cs->chipaddr].coreprocs[cs->coreaddr] = proc; } free(pmsg); // TODO: actual clock = [12,13] timer_set_now(&master_thr->tv_poll); return true; } static bool hashfast_queue_append(struct thr_info * const thr, struct work * const work) { struct cgpu_info * const proc = thr->cgpu; struct hashfast_dev_state * const devstate = proc->device_data; const int fd = devstate->fd; struct hashfast_core_state * const cs = thr->cgpu_data; struct hashfast_chip_state * const chipstate = &devstate->chipstates[cs->chipaddr]; const size_t cmdlen = HASHFAST_HEADER_SIZE + HASHFAST_HASH_SIZE; uint8_t cmd[cmdlen]; uint8_t * const hashdata = &cmd[HASHFAST_HEADER_SIZE]; hashfast_isn_t isn; uint8_t seq; if (cs->has_pending) { thr->queue_full = true; return false; } isn = ++chipstate->last_isn; seq = ++cs->last_seq; work->device_id = seq; cs->last2_isn = cs->last_isn; cs->last_isn = isn; hashfast_prepare_msg(cmd, HFOP_HASH, cs->chipaddr, cs->coreaddr, (cs->coreaddr << 8) | seq, 56); memcpy(&hashdata[ 0], work->midstate, 0x20); memcpy(&hashdata[0x20], &work->data[64], 0xc); memset(&hashdata[0x2c], '\0', 0xa); // starting_nonce, nonce_loops, ntime_loops hashdata[0x36] = 32; // search target (number of zero bits) hashdata[0x37] = 0; cs->has_pending = true; if (cmdlen != hashfast_write(fd, cmd, cmdlen)) return false; DL_APPEND(thr->work, work); if (cs->queued > HASHFAST_QUEUE_MEMORY) { struct work * const old_work = thr->work; DL_DELETE(thr->work, old_work); free_work(old_work); } else ++cs->queued; return true; } static void hashfast_queue_flush(struct thr_info * const thr) { struct cgpu_info * const proc = thr->cgpu; struct hashfast_dev_state * const devstate = proc->device_data; const int fd = devstate->fd; struct hashfast_core_state * const cs = thr->cgpu_data; uint8_t cmd[HASHFAST_HEADER_SIZE]; uint16_t hdata = 2; if ((!thr->work) || stale_work(thr->work->prev, true)) { applog(LOG_DEBUG, "%"PRIpreprv": Flushing both active and pending work", proc->proc_repr); hdata |= 1; } else applog(LOG_DEBUG, "%"PRIpreprv": Flushing pending work", proc->proc_repr); hashfast_send_msg(fd, cmd, HFOP_ABORT, cs->chipaddr, cs->coreaddr, hdata, 0); } static struct cgpu_info *hashfast_find_proc(struct thr_info * const master_thr, int chipaddr, int coreaddr) { struct cgpu_info *proc = master_thr->cgpu; struct hashfast_dev_state * const devstate = proc->device_data; if (coreaddr >= devstate->cores_per_chip) return NULL; const unsigned chip_count = proc->procs / devstate->cores_per_chip; if (chipaddr >= chip_count) return NULL; struct hashfast_chip_state * const chipstate = &devstate->chipstates[chipaddr]; return chipstate->coreprocs[coreaddr]; } static hashfast_isn_t hashfast_get_isn(struct hashfast_chip_state * const chipstate, uint16_t hfseq) { const uint8_t coreaddr = hfseq >> 8; const uint8_t seq = hfseq & 0xff; struct cgpu_info * const proc = chipstate->coreprocs[coreaddr]; struct thr_info * const thr = proc->thr[0]; struct hashfast_core_state * const cs = thr->cgpu_data; if (cs->last_seq == seq) return cs->last_isn; if (cs->last_seq == (uint8_t)(seq + 1)) return cs->last2_isn; return 0; } static void hashfast_submit_nonce(struct thr_info * const thr, struct work * const work, const uint32_t nonce, const bool searched) { struct cgpu_info * const proc = thr->cgpu; struct hashfast_core_state * const cs = thr->cgpu_data; applog(LOG_DEBUG, "%"PRIpreprv": Found nonce for seq %02x (last=%02x): %08lx%s", proc->proc_repr, (unsigned)work->device_id, (unsigned)cs->last_seq, (unsigned long)nonce, searched ? " (searched)" : ""); submit_nonce(thr, work, nonce); } static bool hashfast_poll_msg(struct thr_info * const master_thr) { struct cgpu_info * const dev = master_thr->cgpu; struct hashfast_dev_state * const devstate = dev->device_data; const int fd = devstate->fd; struct hashfast_parsed_msg msg; if (!hashfast_parse_msg(fd, &msg)) return false; switch (msg.opcode) { case HFOP_NONCE: { const uint8_t *data = msg.data; for (int i = msg.datalen / 8; i; --i, (data = &data[8])) { const uint32_t nonce = (data[0] << 0) | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); const uint8_t seq = data[4]; const uint8_t coreaddr = data[5]; // uint32_t ntime = data[6] | ((data[7] & 0xf) << 8); const bool search = data[7] & 0x10; struct cgpu_info * const proc = hashfast_find_proc(master_thr, msg.chipaddr, coreaddr); if (unlikely(!proc)) { applog(LOG_ERR, "%s: Unknown chip/core address %u/%u", dev->dev_repr, (unsigned)msg.chipaddr, (unsigned)coreaddr); inc_hw_errors_only(master_thr); continue; } struct thr_info * const thr = proc->thr[0]; struct hashfast_core_state * const cs = thr->cgpu_data; struct work *work; DL_SEARCH_SCALAR(thr->work, work, device_id, seq); if (unlikely(!work)) { applog(LOG_WARNING, "%"PRIpreprv": Unknown seq %02x (last=%02x)", proc->proc_repr, (unsigned)seq, (unsigned)cs->last_seq); inc_hw_errors2(thr, NULL, &nonce); continue; } unsigned nonces_found = 1; hashfast_submit_nonce(thr, work, nonce, false); if (search) { for (int noffset = 1; noffset <= 0x80; ++noffset) { const uint32_t nonce2 = nonce + noffset; if (test_nonce(work, nonce2, false)) { hashfast_submit_nonce(thr, work, nonce2, true); ++nonces_found; } } if (!nonces_found) { inc_hw_errors_only(thr); applog(LOG_WARNING, "%"PRIpreprv": search=1, but failed to turn up any additional solutions", proc->proc_repr); } } hashes_done2(thr, 0x100000000 * nonces_found, NULL); } break; } case HFOP_STATUS: { const uint8_t *data = &msg.data[8]; struct cgpu_info *proc = hashfast_find_proc(master_thr, msg.chipaddr, 0); if (unlikely(!proc)) { applog(LOG_ERR, "%s: Unknown chip address %u", dev->dev_repr, (unsigned)msg.chipaddr); inc_hw_errors_only(master_thr); break; } struct hashfast_chip_state * const chipstate = &devstate->chipstates[msg.chipaddr]; hashfast_isn_t isn = hashfast_get_isn(chipstate, msg.hdata); int cores_uptodate, cores_active, cores_pending, cores_transitioned; cores_uptodate = cores_active = cores_pending = cores_transitioned = 0; for (int i = 0; i < devstate->cores_per_chip; ++i, (proc = proc->next_proc)) { struct thr_info * const thr = proc->thr[0]; struct hashfast_core_state * const cs = thr->cgpu_data; const uint8_t bits = data[i / 4] >> (2 * (i % 4)); const bool has_active = bits & 1; const bool has_pending = bits & 2; bool try_transition = true; if (cs->last_isn <= isn) ++cores_uptodate; else try_transition = false; if (has_active) ++cores_active; if (has_pending) ++cores_pending; else if (try_transition) { ++cores_transitioned; cs->has_pending = false; thr->queue_full = false; } } applog(LOG_DEBUG, "%s: STATUS from chipaddr=0x%02x with hdata=0x%04x (isn=0x%lx): total=%d uptodate=%d active=%d pending=%d transitioned=%d", dev->dev_repr, (unsigned)msg.chipaddr, (unsigned)msg.hdata, isn, devstate->cores_per_chip, cores_uptodate, cores_active, cores_pending, cores_transitioned); break; } } return true; } static void hashfast_poll(struct thr_info * const master_thr) { struct cgpu_info * const dev = master_thr->cgpu; struct timeval tv_timeout; timer_set_delay_from_now(&tv_timeout, 10000); while (true) { if (!hashfast_poll_msg(master_thr)) { applog(LOG_DEBUG, "%s poll: No more messages", dev->dev_repr); break; } if (timer_passed(&tv_timeout, NULL)) { applog(LOG_DEBUG, "%s poll: 10ms timeout met", dev->dev_repr); break; } } timer_set_delay_from_now(&master_thr->tv_poll, 100000); } struct device_drv hashfast_ums_drv = { .dname = "hashfast_ums", .name = "HFA", .lowl_match = hashfast_lowl_match, .lowl_probe = hashfast_lowl_probe, .thread_init = hashfast_init, .minerloop = minerloop_queue, .queue_append = hashfast_queue_append, .queue_flush = hashfast_queue_flush, .poll = hashfast_poll, }; bfgminer-bfgminer-3.10.0/driver-icarus.c000066400000000000000000001151561226556647300201270ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2012 Xiangfu * Copyright 2012 Andrew Smith * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ /* * Those code should be works fine with V2 and V3 bitstream of Icarus. * Operation: * No detection implement. * Input: 64B = 32B midstate + 20B fill bytes + last 12 bytes of block head. * Return: send back 32bits immediately when Icarus found a valid nonce. * no query protocol implemented here, if no data send back in ~11.3 * seconds (full cover time on 32bit nonce range by 380MH/s speed) * just send another work. * Notice: * 1. Icarus will start calculate when you push a work to them, even they * are busy. * 2. The 2 FPGAs on Icarus will distribute the job, one will calculate the * 0 ~ 7FFFFFFF, another one will cover the 80000000 ~ FFFFFFFF. * 3. It's possible for 2 FPGAs both find valid nonce in the meantime, the 2 * valid nonce will all be send back. * 4. Icarus will stop work when: a valid nonce has been found or 32 bits * nonce range is completely calculated. */ #include "config.h" #include "miner.h" #include #include #include #include #include #include #include #include #include #ifndef WIN32 #include #include #include #ifndef O_CLOEXEC #define O_CLOEXEC 0 #endif #else #include #include #endif #ifdef HAVE_SYS_EPOLL_H #include #define HAVE_EPOLL #endif #include "compat.h" #include "dynclock.h" #include "icarus-common.h" #include "lowl-vcom.h" // The serial I/O speed - Linux uses a define 'B115200' in bits/termios.h #define ICARUS_IO_SPEED 115200 // The number of bytes in a nonce (always 4) // This is NOT the read-size for the Icarus driver // That is defined in ICARUS_INFO->read_size #define ICARUS_NONCE_SIZE 4 #define ASSERT1(condition) __maybe_unused static char sizeof_uint32_t_must_be_4[(condition)?1:-1] ASSERT1(sizeof(uint32_t) == 4); #define ICARUS_READ_TIME(baud, read_size) ((double)read_size * (double)8.0 / (double)(baud)) // Defined in deciseconds // There's no need to have this bigger, since the overhead/latency of extra work // is pretty small once you get beyond a 10s nonce range time and 10s also // means that nothing slower than 429MH/s can go idle so most icarus devices // will always mine without idling #define ICARUS_READ_COUNT_LIMIT_MAX 100 // In timing mode: Default starting value until an estimate can be obtained // 5 seconds allows for up to a ~840MH/s device #define ICARUS_READ_COUNT_TIMING (5 * TIME_FACTOR) // For a standard Icarus REV3 #define ICARUS_REV3_HASH_TIME 0.00000000264083 // Icarus Rev3 doesn't send a completion message when it finishes // the full nonce range, so to avoid being idle we must abort the // work (by starting a new work) shortly before it finishes // // Thus we need to estimate 2 things: // 1) How many hashes were done if the work was aborted // 2) How high can the timeout be before the Icarus is idle, // to minimise the number of work started // We set 2) to 'the calculated estimate' - 1 // to ensure the estimate ends before idle // // The simple calculation used is: // Tn = Total time in seconds to calculate n hashes // Hs = seconds per hash // Xn = number of hashes // W = code overhead per work // // Rough but reasonable estimate: // Tn = Hs * Xn + W (of the form y = mx + b) // // Thus: // Line of best fit (using least squares) // // Hs = (n*Sum(XiTi)-Sum(Xi)*Sum(Ti))/(n*Sum(Xi^2)-Sum(Xi)^2) // W = Sum(Ti)/n - (Hs*Sum(Xi))/n // // N.B. W is less when aborting work since we aren't waiting for the reply // to be transferred back (ICARUS_READ_TIME) // Calculating the hashes aborted at n seconds is thus just n/Hs // (though this is still a slight overestimate due to code delays) // // Both below must be exceeded to complete a set of data // Minimum how long after the first, the last data point must be #define HISTORY_SEC 60 // Minimum how many points a single ICARUS_HISTORY should have #define MIN_DATA_COUNT 5 // The value above used is doubled each history until it exceeds: #define MAX_MIN_DATA_COUNT 100 #if (TIME_FACTOR != 10) #error TIME_FACTOR must be 10 #endif static struct timeval history_sec = { HISTORY_SEC, 0 }; static const char *MODE_DEFAULT_STR = "default"; static const char *MODE_SHORT_STR = "short"; static const char *MODE_SHORT_STREQ = "short="; static const char *MODE_LONG_STR = "long"; static const char *MODE_LONG_STREQ = "long="; static const char *MODE_VALUE_STR = "value"; static const char *MODE_UNKNOWN_STR = "unknown"; #define END_CONDITION 0x0000ffff #define DEFAULT_DETECT_THRESHOLD 1 // Looking for options in --icarus-timing and --icarus-options: // // Code increments this each time we start to look at a device // However, this means that if other devices are checked by // the Icarus code (e.g. BFL) they will count in the option offset // // This, however, is deterministic so that's OK // // If we were to increment after successfully finding an Icarus // that would be random since an Icarus may fail and thus we'd // not be able to predict the option order // // This also assumes that serial_detect() checks them sequentially // and in the order specified on the command line // static int option_offset = -1; BFG_REGISTER_DRIVER(icarus_drv) extern void convert_icarus_to_cairnsmore(struct cgpu_info *); static void rev(unsigned char *s, size_t l) { size_t i, j; unsigned char t; for (i = 0, j = l - 1; i < j; i++, j--) { t = s[i]; s[i] = s[j]; s[j] = t; } } #define icarus_open2(devpath, baud, purge) serial_open(devpath, baud, ICARUS_READ_FAULT_DECISECONDS, purge) #define icarus_open(devpath, baud) icarus_open2(devpath, baud, false) int icarus_gets(unsigned char *buf, int fd, struct timeval *tv_finish, struct thr_info *thr, int read_count, int read_size) { ssize_t ret = 0; int rc = 0; int epollfd = -1; int epoll_timeout = ICARUS_READ_FAULT_DECISECONDS * 100; int read_amount = read_size; bool first = true; #ifdef HAVE_EPOLL struct epoll_event ev = { .events = EPOLLIN, .data.fd = fd, }; struct epoll_event evr[2]; if (thr && thr->work_restart_notifier[1] != -1) { epollfd = epoll_create(2); if (epollfd != -1) { if (-1 == epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev)) { close(epollfd); epollfd = -1; } { ev.data.fd = thr->work_restart_notifier[0]; if (-1 == epoll_ctl(epollfd, EPOLL_CTL_ADD, thr->work_restart_notifier[0], &ev)) applog(LOG_ERR, "Icarus: Error adding work restart fd to epoll"); else { epoll_timeout *= read_count; read_count = 1; } } } else applog(LOG_ERR, "Icarus: Error creating epoll"); } #endif // Read reply 1 byte at a time to get earliest tv_finish while (true) { #ifdef HAVE_EPOLL if (epollfd != -1 && (ret = epoll_wait(epollfd, evr, 2, epoll_timeout)) != -1) { if (ret == 1 && evr[0].data.fd == fd) ret = read(fd, buf, 1); else { if (ret) notifier_read(thr->work_restart_notifier); ret = 0; } } else #endif ret = read(fd, buf, 1); if (ret < 0) return ICA_GETS_ERROR; if (first) cgtime(tv_finish); if (ret >= read_amount) { if (epollfd != -1) close(epollfd); return ICA_GETS_OK; } if (ret > 0) { buf += ret; read_amount -= ret; first = false; continue; } if (thr && thr->work_restart) { if (epollfd != -1) close(epollfd); applog(LOG_DEBUG, "Icarus Read: Interrupted by work restart"); return ICA_GETS_RESTART; } rc++; if (rc >= read_count) { if (epollfd != -1) close(epollfd); applog(LOG_DEBUG, "Icarus Read: No data in %.2f seconds", (float)rc * epoll_timeout / 1000.); return ICA_GETS_TIMEOUT; } } } int icarus_write(int fd, const void *buf, size_t bufLen) { size_t ret; if (unlikely(fd == -1)) return 1; ret = write(fd, buf, bufLen); if (unlikely(ret != bufLen)) return 1; return 0; } #define icarus_close(fd) serial_close(fd) static void do_icarus_close(struct thr_info *thr) { struct cgpu_info *icarus = thr->cgpu; const int fd = icarus->device_fd; if (fd == -1) return; icarus_close(fd); icarus->device_fd = -1; } static const char *timing_mode_str(enum timing_mode timing_mode) { switch(timing_mode) { case MODE_DEFAULT: return MODE_DEFAULT_STR; case MODE_SHORT: return MODE_SHORT_STR; case MODE_LONG: return MODE_LONG_STR; case MODE_VALUE: return MODE_VALUE_STR; default: return MODE_UNKNOWN_STR; } } static void set_timing_mode(int this_option_offset, struct cgpu_info *icarus) { struct ICARUS_INFO *info = icarus->device_data; double Hs; char buf[BUFSIZ+1]; char *ptr, *comma, *eq; size_t max; int i; if (opt_icarus_timing == NULL) buf[0] = '\0'; else { ptr = opt_icarus_timing; for (i = 0; i < this_option_offset; i++) { comma = strchr(ptr, ','); if (comma == NULL) break; ptr = comma + 1; } comma = strchr(ptr, ','); if (comma == NULL) max = strlen(ptr); else max = comma - ptr; if (max > BUFSIZ) max = BUFSIZ; strncpy(buf, ptr, max); buf[max] = '\0'; } info->read_count = 0; info->read_count_limit = 0; // 0 = no limit if (strcasecmp(buf, MODE_SHORT_STR) == 0) { // short info->read_count = ICARUS_READ_COUNT_TIMING; info->timing_mode = MODE_SHORT; info->do_icarus_timing = true; } else if (strncasecmp(buf, MODE_SHORT_STREQ, strlen(MODE_SHORT_STREQ)) == 0) { // short=limit info->read_count = ICARUS_READ_COUNT_TIMING; info->timing_mode = MODE_SHORT; info->do_icarus_timing = true; info->read_count_limit = atoi(&buf[strlen(MODE_SHORT_STREQ)]); if (info->read_count_limit < 0) info->read_count_limit = 0; if (info->read_count_limit > ICARUS_READ_COUNT_LIMIT_MAX) info->read_count_limit = ICARUS_READ_COUNT_LIMIT_MAX; } else if (strcasecmp(buf, MODE_LONG_STR) == 0) { // long info->read_count = ICARUS_READ_COUNT_TIMING; info->timing_mode = MODE_LONG; info->do_icarus_timing = true; } else if (strncasecmp(buf, MODE_LONG_STREQ, strlen(MODE_LONG_STREQ)) == 0) { // long=limit info->read_count = ICARUS_READ_COUNT_TIMING; info->timing_mode = MODE_LONG; info->do_icarus_timing = true; info->read_count_limit = atoi(&buf[strlen(MODE_LONG_STREQ)]); if (info->read_count_limit < 0) info->read_count_limit = 0; if (info->read_count_limit > ICARUS_READ_COUNT_LIMIT_MAX) info->read_count_limit = ICARUS_READ_COUNT_LIMIT_MAX; } else if ((Hs = atof(buf)) != 0) { // ns[=read_count] info->Hs = Hs / NANOSEC; info->fullnonce = info->Hs * (((double)0xffffffff) + 1); if ((eq = strchr(buf, '=')) != NULL) info->read_count = atoi(eq+1); if (info->read_count < 1) info->read_count = (int)(info->fullnonce * TIME_FACTOR) - 1; if (unlikely(info->read_count < 1)) info->read_count = 1; info->timing_mode = MODE_VALUE; info->do_icarus_timing = false; } else { // Anything else in buf just uses DEFAULT mode info->fullnonce = info->Hs * (((double)0xffffffff) + 1); if ((eq = strchr(buf, '=')) != NULL) info->read_count = atoi(eq+1); int def_read_count = ICARUS_READ_COUNT_TIMING; if (info->timing_mode == MODE_DEFAULT) { if (icarus->drv == &icarus_drv) { info->do_default_detection = 0x10; } else { def_read_count = (int)(info->fullnonce * TIME_FACTOR) - 1; } info->do_icarus_timing = false; } if (info->read_count < 1) info->read_count = def_read_count; } info->min_data_count = MIN_DATA_COUNT; applog(LOG_DEBUG, "%"PRIpreprv": Init: mode=%s read_count=%d limit=%dms Hs=%e", icarus->proc_repr, timing_mode_str(info->timing_mode), info->read_count, info->read_count_limit, info->Hs); } static uint32_t mask(int work_division) { uint32_t nonce_mask = 0x7fffffff; // yes we can calculate these, but this way it's easy to see what they are switch (work_division) { case 1: nonce_mask = 0xffffffff; break; case 2: nonce_mask = 0x7fffffff; break; case 4: nonce_mask = 0x3fffffff; break; case 8: nonce_mask = 0x1fffffff; break; default: quit(1, "Invalid2 icarus-options for work_division (%d) must be 1, 2, 4 or 8", work_division); } return nonce_mask; } static void get_options(int this_option_offset, struct ICARUS_INFO *info) { int *baud = &info->baud; int *work_division = &info->work_division; int *fpga_count = &info->fpga_count; char buf[BUFSIZ+1]; char *ptr, *comma, *colon, *colon2; size_t max; int i, tmp; if (opt_icarus_options == NULL) buf[0] = '\0'; else { ptr = opt_icarus_options; for (i = 0; i < this_option_offset; i++) { comma = strchr(ptr, ','); if (comma == NULL) break; ptr = comma + 1; } comma = strchr(ptr, ','); if (comma == NULL) max = strlen(ptr); else max = comma - ptr; if (max > BUFSIZ) max = BUFSIZ; strncpy(buf, ptr, max); buf[max] = '\0'; } if (*buf) { colon = strchr(buf, ':'); if (colon) *(colon++) = '\0'; if (*buf) { tmp = atoi(buf); if (!valid_baud(*baud = tmp)) quit(1, "Invalid icarus-options for baud (%s)", buf); } if (colon && *colon) { colon2 = strchr(colon, ':'); if (colon2) *(colon2++) = '\0'; if (*colon) { info->user_set |= 1; tmp = atoi(colon); if (tmp == 1 || tmp == 2 || tmp == 4 || tmp == 8) { *work_division = tmp; *fpga_count = tmp; // default to the same } else { quit(1, "Invalid icarus-options for work_division (%s) must be 1, 2, 4 or 8", colon); } } if (colon2 && *colon2) { colon = strchr(colon2, ':'); if (colon) *(colon++) = '\0'; if (*colon2) { info->user_set |= 2; tmp = atoi(colon2); if (tmp > 0 && tmp <= *work_division) *fpga_count = tmp; else { quit(1, "Invalid icarus-options for fpga_count (%s) must be >0 and <=work_division (%d)", colon2, *work_division); } } if (colon && *colon) { colon2 = strchr(colon, '-') ?: ""; if (*colon2) *(colon2++) = '\0'; if (strchr(colon, 'r')) info->quirk_reopen = 2; if (strchr(colon2, 'r')) info->quirk_reopen = 0; } } } } } // Number of bytes remaining after reading a nonce from Icarus int icarus_excess_nonce_size(int fd, struct ICARUS_INFO *info) { // How big a buffer? int excess_size = info->read_size - ICARUS_NONCE_SIZE; // Try to read one more to ensure the device doesn't return // more than we want for this driver excess_size++; unsigned char excess_bin[excess_size]; // Read excess_size from Icarus struct timeval tv_now; timer_set_now(&tv_now); //icarus_gets(excess_bin, fd, &tv_now, NULL, 1, excess_size); int bytes_read = read(fd, excess_bin, excess_size); // Number of bytes that were still available return bytes_read; } bool icarus_detect_custom(const char *devpath, struct device_drv *api, struct ICARUS_INFO *info) { int this_option_offset = ++option_offset; struct timeval tv_start, tv_finish; int fd; // Block 171874 nonce = (0xa2870100) = 0x000187a2 // N.B. golden_ob MUST take less time to calculate // than the timeout set in icarus_open() // This one takes ~0.53ms on Rev3 Icarus const char golden_ob[] = "4679ba4ec99876bf4bfe086082b40025" "4df6c356451471139a3afa71e48f544a" "00000000000000000000000000000000" "0000000087320b1a1426674f2fa722ce"; /* NOTE: This gets sent to basically every port specified in --scan-serial, * even ones that aren't Icarus; be sure they can all handle it, when * this is changed... * BitForce: Ignores entirely * ModMiner: Starts (useless) work, gets back to clean state */ const char golden_nonce[] = "000187a2"; unsigned char ob_bin[64], nonce_bin[ICARUS_NONCE_SIZE]; char nonce_hex[(sizeof(nonce_bin) * 2) + 1]; get_options(this_option_offset, info); int baud = info->baud; int work_division = info->work_division; int fpga_count = info->fpga_count; applog(LOG_DEBUG, "Icarus Detect: Attempting to open %s", devpath); fd = icarus_open2(devpath, baud, true); if (unlikely(fd == -1)) { applog(LOG_DEBUG, "Icarus Detect: Failed to open %s", devpath); return false; } // Set a default so that individual drivers need not specify // e.g. Cairnsmore if (info->read_size == 0) info->read_size = ICARUS_DEFAULT_READ_SIZE; hex2bin(ob_bin, golden_ob, sizeof(ob_bin)); icarus_write(fd, ob_bin, sizeof(ob_bin)); cgtime(&tv_start); memset(nonce_bin, 0, sizeof(nonce_bin)); // Do not use info->read_size here, instead read exactly ICARUS_NONCE_SIZE // We will then compare the bytes left in fd with info->read_size to determine // if this is a valid device icarus_gets(nonce_bin, fd, &tv_finish, NULL, 1, ICARUS_NONCE_SIZE); // How many bytes were left after reading the above nonce int bytes_left = icarus_excess_nonce_size(fd, info); icarus_close(fd); bin2hex(nonce_hex, nonce_bin, sizeof(nonce_bin)); if (strncmp(nonce_hex, golden_nonce, 8)) { applog(LOG_DEBUG, "Icarus Detect: " "Test failed at %s: get %s, should: %s", devpath, nonce_hex, golden_nonce); return false; } if (info->read_size - ICARUS_NONCE_SIZE != bytes_left) { applog(LOG_DEBUG, "Icarus Detect: " "Test failed at %s: expected %d bytes, got %d", devpath, info->read_size, ICARUS_NONCE_SIZE + bytes_left); return false; } applog(LOG_DEBUG, "Icarus Detect: " "Test succeeded at %s: got %s", devpath, nonce_hex); if (serial_claim_v(devpath, api)) return false; /* We have a real Icarus! */ struct cgpu_info *icarus; icarus = calloc(1, sizeof(struct cgpu_info)); icarus->drv = api; icarus->device_path = strdup(devpath); icarus->device_fd = -1; icarus->threads = 1; add_cgpu(icarus); applog(LOG_INFO, "Found %"PRIpreprv" at %s", icarus->proc_repr, devpath); applog(LOG_DEBUG, "%"PRIpreprv": Init: baud=%d work_division=%d fpga_count=%d", icarus->proc_repr, baud, work_division, fpga_count); icarus->device_data = info; timersub(&tv_finish, &tv_start, &(info->golden_tv)); set_timing_mode(this_option_offset, icarus); return true; } static bool icarus_detect_one(const char *devpath) { struct ICARUS_INFO *info = calloc(1, sizeof(struct ICARUS_INFO)); if (unlikely(!info)) quit(1, "Failed to malloc ICARUS_INFO"); // TODO: try some higher speeds with the Icarus and BFL to see // if they support them and if setting them makes any difference // N.B. B3000000 doesn't work on Icarus info->baud = ICARUS_IO_SPEED; info->quirk_reopen = 1; info->Hs = ICARUS_REV3_HASH_TIME; info->timing_mode = MODE_DEFAULT; info->read_size = ICARUS_DEFAULT_READ_SIZE; if (!icarus_detect_custom(devpath, &icarus_drv, info)) { free(info); return false; } return true; } static bool icarus_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, icarus_detect_one); } static bool icarus_prepare(struct thr_info *thr) { struct cgpu_info *icarus = thr->cgpu; struct ICARUS_INFO *info = icarus->device_data; icarus->device_fd = -1; int fd = icarus_open2(icarus->device_path, info->baud, true); if (unlikely(-1 == fd)) { applog(LOG_ERR, "Failed to open Icarus on %s", icarus->device_path); return false; } icarus->device_fd = fd; applog(LOG_INFO, "Opened Icarus on %s", icarus->device_path); struct icarus_state *state; thr->cgpu_data = state = calloc(1, sizeof(*state)); state->firstrun = true; #ifdef HAVE_EPOLL int epollfd = epoll_create(2); if (epollfd != -1) { close(epollfd); notifier_init(thr->work_restart_notifier); } #endif icarus->status = LIFE_INIT2; return true; } static bool icarus_init(struct thr_info *thr) { struct cgpu_info *icarus = thr->cgpu; struct ICARUS_INFO *info = icarus->device_data; int fd = icarus->device_fd; if (!info->work_division) { struct timeval tv_finish; // For reading the nonce from Icarus unsigned char res_bin[info->read_size]; // For storing the the 32-bit nonce uint32_t res; applog(LOG_DEBUG, "%"PRIpreprv": Work division not specified - autodetecting", icarus->proc_repr); // Special packet to probe work_division unsigned char pkt[64] = "\x2e\x4c\x8f\x91\xfd\x59\x5d\x2d\x7e\xa2\x0a\xaa\xcb\x64\xa2\xa0" "\x43\x82\x86\x02\x77\xcf\x26\xb6\xa1\xee\x04\xc5\x6a\x5b\x50\x4a" "BFGMiner Probe\0\0" "BFG\0\x64\x61\x01\x1a\xc9\x06\xa9\x51\xfb\x9b\x3c\x73"; icarus_write(fd, pkt, sizeof(pkt)); memset(res_bin, 0, sizeof(res_bin)); if (ICA_GETS_OK == icarus_gets(res_bin, fd, &tv_finish, NULL, info->read_count, info->read_size)) { memcpy(&res, res_bin, sizeof(res)); res = be32toh(res); } else res = 0; switch (res) { case 0x04C0FDB4: info->work_division = 1; break; case 0x82540E46: info->work_division = 2; break; case 0x417C0F36: info->work_division = 4; break; case 0x60C994D5: info->work_division = 8; break; default: applog(LOG_ERR, "%"PRIpreprv": Work division autodetection failed (assuming 2): got %08x", icarus->proc_repr, res); info->work_division = 2; } applog(LOG_DEBUG, "%"PRIpreprv": Work division autodetection got %08x (=%d)", icarus->proc_repr, res, info->work_division); } if (!info->fpga_count) info->fpga_count = info->work_division; info->nonce_mask = mask(info->work_division); return true; } static bool icarus_reopen(struct cgpu_info *icarus, struct icarus_state *state, int *fdp) { struct ICARUS_INFO *info = icarus->device_data; // Reopen the serial port to workaround a USB-host-chipset-specific issue with the Icarus's buggy USB-UART do_icarus_close(icarus->thr[0]); *fdp = icarus->device_fd = icarus_open(icarus->device_path, info->baud); if (unlikely(-1 == *fdp)) { applog(LOG_ERR, "%"PRIpreprv": Failed to reopen on %s", icarus->proc_repr, icarus->device_path); dev_error(icarus, REASON_DEV_COMMS_ERROR); state->firstrun = true; return false; } return true; } static bool icarus_job_prepare(struct thr_info *thr, struct work *work, __maybe_unused uint64_t max_nonce) { struct cgpu_info * const icarus = thr->cgpu; struct icarus_state * const state = thr->cgpu_data; uint8_t * const ob_bin = state->ob_bin; memcpy(ob_bin, work->midstate, 32); memcpy(ob_bin + 52, work->data + 64, 12); if (!(memcmp(&ob_bin[56], "\xff\xff\xff\xff", 4) || memcmp(&ob_bin, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", 32))) { // This sequence is used on cairnsmore bitstreams for commands, NEVER send it otherwise applog(LOG_WARNING, "%"PRIpreprv": Received job attempting to send a command, corrupting it!", icarus->proc_repr); ob_bin[56] = 0; } rev(ob_bin, 32); rev(ob_bin + 52, 12); return true; } static bool icarus_job_start(struct thr_info *thr) { struct cgpu_info *icarus = thr->cgpu; struct ICARUS_INFO *info = icarus->device_data; struct icarus_state *state = thr->cgpu_data; const uint8_t * const ob_bin = state->ob_bin; int fd = icarus->device_fd; int ret; // Handle dynamic clocking for "subclass" devices // This needs to run before sending next job, since it hashes the command too if (info->dclk.freqM && likely(!state->firstrun)) { dclk_preUpdate(&info->dclk); dclk_updateFreq(&info->dclk, info->dclk_change_clock_func, thr); } cgtime(&state->tv_workstart); ret = icarus_write(fd, ob_bin, 64); if (ret) { do_icarus_close(thr); applog(LOG_ERR, "%"PRIpreprv": Comms error (werr=%d)", icarus->proc_repr, ret); dev_error(icarus, REASON_DEV_COMMS_ERROR); return false; /* This should never happen */ } if (opt_debug) { char ob_hex[129]; bin2hex(ob_hex, ob_bin, 64); applog(LOG_DEBUG, "%"PRIpreprv" sent: %s", icarus->proc_repr, ob_hex); } return true; } static struct work *icarus_process_worknonce(struct icarus_state *state, uint32_t *nonce) { *nonce = be32toh(*nonce); if (test_nonce(state->last_work, *nonce, false)) return state->last_work; if (likely(state->last2_work && test_nonce(state->last2_work, *nonce, false))) return state->last2_work; return NULL; } static void handle_identify(struct thr_info * const thr, int ret, const bool was_first_run) { const struct cgpu_info * const icarus = thr->cgpu; const struct ICARUS_INFO * const info = icarus->device_data; struct icarus_state * const state = thr->cgpu_data; int fd = icarus->device_fd; struct timeval tv_now; double delapsed; // For reading the nonce from Icarus unsigned char nonce_bin[info->read_size]; // For storing the the 32-bit nonce uint32_t nonce; if (fd == -1) return; // If identify is requested (block erupters): // 1. Don't start the next job right away (above) // 2. Wait for the current job to complete 100% if (!was_first_run) { applog(LOG_DEBUG, "%"PRIpreprv": Identify: Waiting for current job to finish", icarus->proc_repr); while (true) { cgtime(&tv_now); delapsed = tdiff(&tv_now, &state->tv_workstart); if (delapsed + 0.1 > info->fullnonce) break; // Try to get more nonces (ignoring work restart) memset(nonce_bin, 0, sizeof(nonce_bin)); ret = icarus_gets(nonce_bin, fd, &tv_now, NULL, (info->fullnonce - delapsed) * 10, info->read_size); if (ret == ICA_GETS_OK) { memcpy(&nonce, nonce_bin, sizeof(nonce)); nonce = be32toh(nonce); submit_nonce(thr, state->last_work, nonce); } } } else applog(LOG_DEBUG, "%"PRIpreprv": Identify: Current job should already be finished", icarus->proc_repr); // 3. Delay 3 more seconds applog(LOG_DEBUG, "%"PRIpreprv": Identify: Leaving idle for 3 seconds", icarus->proc_repr); cgsleep_ms(3000); // Check for work restart in the meantime if (thr->work_restart) { applog(LOG_DEBUG, "%"PRIpreprv": Identify: Work restart requested during delay", icarus->proc_repr); goto no_job_start; } // 4. Start next job if (!state->firstrun) { applog(LOG_DEBUG, "%"PRIpreprv": Identify: Starting next job", icarus->proc_repr); if (!icarus_job_start(thr)) no_job_start: state->firstrun = true; } state->identify = false; } static void icarus_transition_work(struct icarus_state *state, struct work *work) { if (state->last2_work) free_work(state->last2_work); state->last2_work = state->last_work; state->last_work = copy_work(work); } static int64_t icarus_scanhash(struct thr_info *thr, struct work *work, __maybe_unused int64_t max_nonce) { struct cgpu_info *icarus; int fd; int ret; struct ICARUS_INFO *info; struct work *nonce_work; int64_t hash_count; struct timeval tv_start = {.tv_sec=0}, elapsed; struct timeval tv_history_start, tv_history_finish; double Ti, Xi; int i; bool was_hw_error = false; bool was_first_run; struct ICARUS_HISTORY *history0, *history; int count; double Hs, W, fullnonce; int read_count; bool limited; int64_t estimate_hashes; uint32_t values; int64_t hash_count_range; elapsed.tv_sec = elapsed.tv_usec = 0; icarus = thr->cgpu; struct icarus_state *state = thr->cgpu_data; was_first_run = state->firstrun; icarus_job_prepare(thr, work, max_nonce); // Wait for the previous run's result fd = icarus->device_fd; info = icarus->device_data; // For reading the nonce from Icarus unsigned char nonce_bin[info->read_size]; // For storing the the 32-bit nonce uint32_t nonce; if (unlikely(fd == -1) && !icarus_reopen(icarus, state, &fd)) return -1; if (!state->firstrun) { if (state->changework) { state->changework = false; ret = ICA_GETS_RESTART; } else { read_count = info->read_count; keepwaiting: /* Icarus will return info->read_size bytes nonces or nothing */ memset(nonce_bin, 0, sizeof(nonce_bin)); ret = icarus_gets(nonce_bin, fd, &state->tv_workfinish, thr, read_count, info->read_size); switch (ret) { case ICA_GETS_RESTART: // The prepared work is invalid, and the current work is abandoned // Go back to the main loop to get the next work, and stuff // Returning to the main loop will clear work_restart, so use a flag... state->changework = true; return 0; case ICA_GETS_ERROR: do_icarus_close(thr); applog(LOG_ERR, "%"PRIpreprv": Comms error (rerr)", icarus->proc_repr); dev_error(icarus, REASON_DEV_COMMS_ERROR); if (!icarus_reopen(icarus, state, &fd)) return -1; break; case ICA_GETS_TIMEOUT: if (info->quirk_reopen == 1 && !icarus_reopen(icarus, state, &fd)) return -1; case ICA_GETS_OK: break; } } tv_start = state->tv_workstart; timersub(&state->tv_workfinish, &tv_start, &elapsed); } else { if (fd == -1 && !icarus_reopen(icarus, state, &fd)) return -1; // First run; no nonce, no hashes done ret = ICA_GETS_ERROR; } #ifndef WIN32 tcflush(fd, TCOFLUSH); #endif if (ret == ICA_GETS_OK) { memcpy(&nonce, nonce_bin, sizeof(nonce)); nonce_work = icarus_process_worknonce(state, &nonce); if (likely(nonce_work)) { if (nonce_work == state->last2_work) { // nonce was for the last job; submit and keep processing the current one submit_nonce(thr, nonce_work, nonce); goto keepwaiting; } if (info->continue_search) { read_count = info->read_count - ((timer_elapsed_us(&state->tv_workstart, NULL) / (1000000 / TIME_FACTOR)) + 1); if (read_count) { submit_nonce(thr, nonce_work, nonce); goto keepwaiting; } } } else was_hw_error = true; } // Handle dynamic clocking for "subclass" devices // This needs to run before sending next job, since it hashes the command too if (info->dclk.freqM && likely(ret == ICA_GETS_OK || ret == ICA_GETS_TIMEOUT)) { int qsec = ((4 * elapsed.tv_sec) + (elapsed.tv_usec / 250000)) ?: 1; for (int n = qsec; n; --n) dclk_gotNonces(&info->dclk); if (was_hw_error) dclk_errorCount(&info->dclk, qsec); } // Force a USB close/reopen on any hw error if (was_hw_error && info->quirk_reopen != 2) { if (!icarus_reopen(icarus, state, &fd)) state->firstrun = true; } if (unlikely(state->identify)) { // Delay job start until later... } else if (unlikely(icarus->deven != DEV_ENABLED || !icarus_job_start(thr))) state->firstrun = true; if (info->quirk_reopen == 2 && !icarus_reopen(icarus, state, &fd)) state->firstrun = true; work->blk.nonce = 0xffffffff; if (ret == ICA_GETS_ERROR) { state->firstrun = false; icarus_transition_work(state, work); hash_count = 0; goto out; } // OK, done starting Icarus's next job... now process the last run's result! // aborted before becoming idle, get new work if (ret == ICA_GETS_TIMEOUT || ret == ICA_GETS_RESTART) { icarus_transition_work(state, work); // ONLY up to just when it aborted // We didn't read a reply so we don't subtract ICARUS_READ_TIME estimate_hashes = ((double)(elapsed.tv_sec) + ((double)(elapsed.tv_usec))/((double)1000000)) / info->Hs; // If some Serial-USB delay allowed the full nonce range to // complete it can't have done more than a full nonce if (unlikely(estimate_hashes > 0xffffffff)) estimate_hashes = 0xffffffff; applog(LOG_DEBUG, "%"PRIpreprv" no nonce = 0x%08"PRIx64" hashes (%"PRId64".%06lus)", icarus->proc_repr, (uint64_t)estimate_hashes, (int64_t)elapsed.tv_sec, (unsigned long)elapsed.tv_usec); hash_count = estimate_hashes; goto out; } // Only ICA_GETS_OK gets here if (likely(!was_hw_error)) submit_nonce(thr, nonce_work, nonce); else inc_hw_errors(thr, state->last_work, nonce); icarus_transition_work(state, work); hash_count = (nonce & info->nonce_mask); hash_count++; hash_count *= info->fpga_count; applog(LOG_DEBUG, "%"PRIpreprv" nonce = 0x%08x = 0x%08" PRIx64 " hashes (%"PRId64".%06lus)", icarus->proc_repr, nonce, (uint64_t)hash_count, (int64_t)elapsed.tv_sec, (unsigned long)elapsed.tv_usec); if (info->do_default_detection && elapsed.tv_sec >= DEFAULT_DETECT_THRESHOLD) { int MHs = (double)hash_count / ((double)elapsed.tv_sec * 1e6 + (double)elapsed.tv_usec); --info->do_default_detection; applog(LOG_DEBUG, "%"PRIpreprv": Autodetect device speed: %d MH/s", icarus->proc_repr, MHs); if (MHs <= 370 || MHs > 420) { // Not a real Icarus: enable short timing applog(LOG_WARNING, "%"PRIpreprv": Seems too %s to be an Icarus; calibrating with short timing", icarus->proc_repr, MHs>380?"fast":"slow"); info->timing_mode = MODE_SHORT; info->do_icarus_timing = true; info->do_default_detection = 0; } else if (MHs <= 380) { // Real Icarus? if (!info->do_default_detection) { applog(LOG_DEBUG, "%"PRIpreprv": Seems to be a real Icarus", icarus->proc_repr); info->read_count = (int)(info->fullnonce * TIME_FACTOR) - 1; } } else if (MHs <= 420) { // Enterpoint Cairnsmore1 size_t old_repr_len = strlen(icarus->proc_repr); char old_repr[old_repr_len + 1]; strcpy(old_repr, icarus->proc_repr); convert_icarus_to_cairnsmore(icarus); info->do_default_detection = 0; applog(LOG_WARNING, "%"PRIpreprv": Detected Cairnsmore1 device, upgrading driver to %"PRIpreprv, old_repr, icarus->proc_repr); } } // Ignore possible end condition values ... and hw errors // TODO: set limitations on calculated values depending on the device // to avoid crap values caused by CPU/Task Switching/Swapping/etc if (info->do_icarus_timing && !was_hw_error && ((nonce & info->nonce_mask) > END_CONDITION) && ((nonce & info->nonce_mask) < (info->nonce_mask & ~END_CONDITION))) { cgtime(&tv_history_start); history0 = &(info->history[0]); if (history0->values == 0) timeradd(&tv_start, &history_sec, &(history0->finish)); Ti = (double)(elapsed.tv_sec) + ((double)(elapsed.tv_usec))/((double)1000000) - ((double)ICARUS_READ_TIME(info->baud, info->read_size)); Xi = (double)hash_count; history0->sumXiTi += Xi * Ti; history0->sumXi += Xi; history0->sumTi += Ti; history0->sumXi2 += Xi * Xi; history0->values++; if (history0->hash_count_max < hash_count) history0->hash_count_max = hash_count; if (history0->hash_count_min > hash_count || history0->hash_count_min == 0) history0->hash_count_min = hash_count; if (history0->values >= info->min_data_count && timercmp(&tv_start, &(history0->finish), >)) { for (i = INFO_HISTORY; i > 0; i--) memcpy(&(info->history[i]), &(info->history[i-1]), sizeof(struct ICARUS_HISTORY)); // Initialise history0 to zero for summary calculation memset(history0, 0, sizeof(struct ICARUS_HISTORY)); // We just completed a history data set // So now recalc read_count based on the whole history thus we will // initially get more accurate until it completes INFO_HISTORY // total data sets count = 0; for (i = 1 ; i <= INFO_HISTORY; i++) { history = &(info->history[i]); if (history->values >= MIN_DATA_COUNT) { count++; history0->sumXiTi += history->sumXiTi; history0->sumXi += history->sumXi; history0->sumTi += history->sumTi; history0->sumXi2 += history->sumXi2; history0->values += history->values; if (history0->hash_count_max < history->hash_count_max) history0->hash_count_max = history->hash_count_max; if (history0->hash_count_min > history->hash_count_min || history0->hash_count_min == 0) history0->hash_count_min = history->hash_count_min; } } // All history data Hs = (history0->values*history0->sumXiTi - history0->sumXi*history0->sumTi) / (history0->values*history0->sumXi2 - history0->sumXi*history0->sumXi); W = history0->sumTi/history0->values - Hs*history0->sumXi/history0->values; hash_count_range = history0->hash_count_max - history0->hash_count_min; values = history0->values; // Initialise history0 to zero for next data set memset(history0, 0, sizeof(struct ICARUS_HISTORY)); fullnonce = W + Hs * (((double)0xffffffff) + 1); read_count = (int)(fullnonce * TIME_FACTOR) - 1; if (info->read_count_limit > 0 && read_count > info->read_count_limit) { read_count = info->read_count_limit; limited = true; } else limited = false; info->Hs = Hs; info->read_count = read_count; info->fullnonce = fullnonce; info->count = count; info->W = W; info->values = values; info->hash_count_range = hash_count_range; if (info->min_data_count < MAX_MIN_DATA_COUNT) info->min_data_count *= 2; else if (info->timing_mode == MODE_SHORT) info->do_icarus_timing = false; // applog(LOG_DEBUG, "%"PRIpreprv" Re-estimate: read_count=%d%s fullnonce=%fs history count=%d Hs=%e W=%e values=%d hash range=0x%08lx min data count=%u", icarus->proc_repr, read_count, limited ? " (limited)" : "", fullnonce, count, Hs, W, values, hash_count_range, info->min_data_count); applog(LOG_DEBUG, "%"PRIpreprv" Re-estimate: Hs=%e W=%e read_count=%d%s fullnonce=%.3fs", icarus->proc_repr, Hs, W, read_count, limited ? " (limited)" : "", fullnonce); } info->history_count++; cgtime(&tv_history_finish); timersub(&tv_history_finish, &tv_history_start, &tv_history_finish); timeradd(&tv_history_finish, &(info->history_time), &(info->history_time)); } out: if (unlikely(state->identify)) handle_identify(thr, ret, was_first_run); return hash_count; } static struct api_data *icarus_drv_stats(struct cgpu_info *cgpu) { struct api_data *root = NULL; struct ICARUS_INFO *info = cgpu->device_data; // Warning, access to these is not locked - but we don't really // care since hashing performance is way more important than // locking access to displaying API debug 'stats' // If locking becomes an issue for any of them, use copy_data=true also root = api_add_int(root, "read_count", &(info->read_count), false); root = api_add_int(root, "read_count_limit", &(info->read_count_limit), false); root = api_add_double(root, "fullnonce", &(info->fullnonce), false); root = api_add_int(root, "count", &(info->count), false); root = api_add_hs(root, "Hs", &(info->Hs), false); root = api_add_double(root, "W", &(info->W), false); root = api_add_uint(root, "total_values", &(info->values), false); root = api_add_uint64(root, "range", &(info->hash_count_range), false); root = api_add_uint64(root, "history_count", &(info->history_count), false); root = api_add_timeval(root, "history_time", &(info->history_time), false); root = api_add_uint(root, "min_data_count", &(info->min_data_count), false); root = api_add_uint(root, "timing_values", &(info->history[0].values), false); root = api_add_const(root, "timing_mode", timing_mode_str(info->timing_mode), false); root = api_add_bool(root, "is_timing", &(info->do_icarus_timing), false); root = api_add_int(root, "baud", &(info->baud), false); root = api_add_int(root, "work_division", &(info->work_division), false); root = api_add_int(root, "fpga_count", &(info->fpga_count), false); return root; } static void icarus_shutdown(struct thr_info *thr) { do_icarus_close(thr); free(thr->cgpu_data); } struct device_drv icarus_drv = { .dname = "icarus", .name = "ICA", .probe_priority = -120, .lowl_probe = icarus_lowl_probe, .get_api_stats = icarus_drv_stats, .thread_prepare = icarus_prepare, .thread_init = icarus_init, .scanhash = icarus_scanhash, .thread_disable = close_device_fd, .thread_shutdown = icarus_shutdown, }; bfgminer-bfgminer-3.10.0/driver-klondike.c000066400000000000000000001327241226556647300204410ustar00rootroot00000000000000/* * Copyright 2013 Andrew Smith * Copyright 2013 Con Kolivas * Copyright 2013 Chris Savery * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #ifdef WIN32 #include #endif #include "compat.h" #include "deviceapi.h" #include "lowlevel.h" #include "lowl-usb.h" #include "miner.h" #define K1 "K1" #define K16 "K16" #define K64 "K64" static const char *msg_detect_send = "DSend"; static const char *msg_detect_reply = "DReply"; static const char *msg_send = "Send"; static const char *msg_reply = "Reply"; #define KLN_CMD_ABORT 'A' #define KLN_CMD_CONFIG 'C' #define KLN_CMD_ENABLE 'E' #define KLN_CMD_IDENT 'I' #define KLN_CMD_NONCE '=' #define KLN_CMD_STATUS 'S' #define KLN_CMD_WORK 'W' #define KLN_CMD_ENABLE_OFF '0' #define KLN_CMD_ENABLE_ON '1' #define MIDSTATE_BYTES 32 #define MERKLE_OFFSET 64 #define MERKLE_BYTES 12 #define REPLY_SIZE 15 // adequate for all types of replies #define MAX_KLINES 1024 // unhandled reply limit #define REPLY_WAIT_TIME 100 // poll interval for a cmd waiting it's reply #define CMD_REPLY_RETRIES 8 // how many retries for cmds #define MAX_WORK_COUNT 4 // for now, must be binary multiple and match firmware #define TACH_FACTOR 87890 // fan rpm divisor #define KLN_KILLWORK_TEMP 53.5 #define KLN_COOLED_DOWN 45.5 /* * Work older than 5s will already be completed * FYI it must not be possible to complete 256 work * items this quickly on a single device - * thus limited to 219.9GH/s per device */ #define OLD_WORK_MS ((int)(5 * 1000)) /* * How many incorrect slave counts to ignore in a row * 2 means it allows random grabage returned twice * Until slaves are implemented, this should never occur * so allowing 2 in a row should ignore random errros */ #define KLN_ISS_IGNORE 2 /* * If the queue status hasn't been updated for this long then do it now * 5GH/s = 859ms per full nonce range */ #define LATE_UPDATE_MS ((int)(2.5 * 1000)) // If 5 late updates in a row, try to reset the device #define LATE_UPDATE_LIMIT 5 // If the reset fails sleep for 1s #define LATE_UPDATE_SLEEP_MS 1000 // However give up after 8s #define LATE_UPDATE_NODEV_MS ((int)(8.0 * 1000)) BFG_REGISTER_DRIVER(klondike_drv) typedef struct klondike_header { uint8_t cmd; uint8_t dev; uint8_t buf[REPLY_SIZE-2]; } HEADER; #define K_2(_bytes) ((int)(_bytes[0]) + \ ((int)(_bytes[1]) << 8)) #define K_4(_bytes) ((uint64_t)(_bytes[0]) + \ ((uint64_t)(_bytes[1]) << 8) + \ ((uint64_t)(_bytes[2]) << 16) + \ ((uint64_t)(_bytes[3]) << 24)) #define K_SERIAL(_serial) K_4(_serial) #define K_HASHCOUNT(_hashcount) K_2(_hashcount) #define K_MAXCOUNT(_maxcount) K_2(_maxcount) #define K_NONCE(_nonce) K_4(_nonce) #define K_HASHCLOCK(_hashclock) K_2(_hashclock) #define SET_HASHCLOCK(_hashclock, _value) do { \ (_hashclock)[0] = (uint8_t)((_value) & 0xff); \ (_hashclock)[1] = (uint8_t)(((_value) >> 8) & 0xff); \ } while(0) #define KSENDHD(_add) (sizeof(uint8_t) + sizeof(uint8_t) + _add) typedef struct klondike_id { uint8_t cmd; uint8_t dev; uint8_t version; uint8_t product[7]; uint8_t serial[4]; } IDENTITY; typedef struct klondike_status { uint8_t cmd; uint8_t dev; uint8_t state; uint8_t chipcount; uint8_t slavecount; uint8_t workqc; uint8_t workid; uint8_t temp; uint8_t fanspeed; uint8_t errorcount; uint8_t hashcount[2]; uint8_t maxcount[2]; uint8_t noise; } WORKSTATUS; typedef struct _worktask { uint8_t cmd; uint8_t dev; uint8_t workid; uint8_t midstate[32]; uint8_t merkle[12]; } WORKTASK; typedef struct _workresult { uint8_t cmd; uint8_t dev; uint8_t workid; uint8_t nonce[4]; } WORKRESULT; typedef struct klondike_cfg { uint8_t cmd; uint8_t dev; uint8_t hashclock[2]; uint8_t temptarget; uint8_t tempcritical; uint8_t fantarget; uint8_t pad2; } WORKCFG; typedef struct kline { union { HEADER hd; IDENTITY id; WORKSTATUS ws; WORKTASK wt; WORKRESULT wr; WORKCFG cfg; }; } KLINE; #define zero_kline(_kline) memset((void *)(_kline), 0, sizeof(KLINE)); typedef struct device_info { uint32_t noncecount; uint32_t nextworkid; uint16_t lasthashcount; uint64_t totalhashcount; uint32_t rangesize; uint32_t *chipstats; } DEVINFO; typedef struct klist { struct klist *prev; struct klist *next; KLINE kline; struct timeval tv_when; int block_seq; bool ready; bool working; } KLIST; typedef struct jobque { int workqc; struct timeval last_update; bool overheat; bool flushed; int late_update_count; int late_update_sequential; } JOBQUE; struct klondike_info { pthread_rwlock_t stat_lock; struct thr_info replies_thr; cglock_t klist_lock; KLIST *used; KLIST *free; int kline_count; int used_count; int block_seq; KLIST *status; DEVINFO *devinfo; KLIST *cfg; JOBQUE *jobque; int noncecount; uint64_t hashcount; uint64_t errorcount; uint64_t noisecount; int incorrect_slave_sequential; int16_t nonce_offset; // us Delay from USB reply to being processed double delay_count; double delay_total; double delay_min; double delay_max; struct timeval tv_last_nonce_received; // Time from recieving one nonce to the next double nonce_count; double nonce_total; double nonce_min; double nonce_max; int wque_size; int wque_cleared; bool initialised; struct libusb_device_handle *usbdev_handle; // TODO: bool usbinfo_nodev; }; static KLIST *new_klist_set(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); KLIST *klist = NULL; int i; klist = calloc(MAX_KLINES, sizeof(*klist)); if (!klist) quit(1, "Failed to calloc klist - when old count=%d", klninfo->kline_count); klninfo->kline_count += MAX_KLINES; klist[0].prev = NULL; klist[0].next = &(klist[1]); for (i = 1; i < MAX_KLINES-1; i++) { klist[i].prev = &klist[i-1]; klist[i].next = &klist[i+1]; } klist[MAX_KLINES-1].prev = &(klist[MAX_KLINES-2]); klist[MAX_KLINES-1].next = NULL; return klist; } static KLIST *allocate_kitem(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); KLIST *kitem = NULL; int ran_out = 0; char errbuf[1024]; cg_wlock(&klninfo->klist_lock); if (klninfo->free == NULL) { ran_out = klninfo->kline_count; klninfo->free = new_klist_set(klncgpu); snprintf(errbuf, sizeof(errbuf), "%s%i: KLINE count exceeded %d, now %d", klncgpu->drv->name, klncgpu->device_id, ran_out, klninfo->kline_count); } kitem = klninfo->free; klninfo->free = klninfo->free->next; if (klninfo->free) klninfo->free->prev = NULL; kitem->next = klninfo->used; kitem->prev = NULL; if (kitem->next) kitem->next->prev = kitem; klninfo->used = kitem; kitem->ready = false; kitem->working = false; memset((void *)&(kitem->kline), 0, sizeof(kitem->kline)); klninfo->used_count++; cg_wunlock(&klninfo->klist_lock); if (ran_out > 0) applog(LOG_WARNING, "%s", errbuf); return kitem; } static KLIST *release_kitem(struct cgpu_info *klncgpu, KLIST *kitem) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); cg_wlock(&klninfo->klist_lock); if (kitem == klninfo->used) klninfo->used = kitem->next; if (kitem->next) kitem->next->prev = kitem->prev; if (kitem->prev) kitem->prev->next = kitem->next; kitem->next = klninfo->free; if (klninfo->free) klninfo->free->prev = kitem; kitem->prev = NULL; klninfo->free = kitem; klninfo->used_count--; cg_wunlock(&klninfo->klist_lock); return NULL; } static int usb_init(struct cgpu_info * const klncgpu, struct libusb_device * const dev) { struct klondike_info * const klninfo = klncgpu->device_data; int e; if (libusb_open(dev, &klninfo->usbdev_handle) != LIBUSB_SUCCESS) return 0; if (LIBUSB_SUCCESS != (e = libusb_set_configuration(klninfo->usbdev_handle, 1))) { applog(LOG_DEBUG, "%s: Failed to set configuration 1: %s", klondike_drv.dname, bfg_strerror(e, BST_LIBUSB)); fail: libusb_close(klninfo->usbdev_handle); return 0; } if (LIBUSB_SUCCESS != (e = libusb_claim_interface(klninfo->usbdev_handle, 0))) { applog(LOG_DEBUG, "%s: Failed to claim interface 0: %s", klondike_drv.dname, bfg_strerror(e, BST_LIBUSB)); goto fail; } return 1; } static int _usb_rw(struct cgpu_info * const klncgpu, void * const buf, const size_t bufsiz, int * const processed, int ep) { struct klondike_info * const klninfo = klncgpu->device_data; const unsigned int timeout = 999; unsigned char *cbuf = buf; int err, sent; *processed = 0; while (*processed < bufsiz) { err = libusb_bulk_transfer(klninfo->usbdev_handle, ep, cbuf, bufsiz, &sent, timeout); if (unlikely(err)) return err; *processed += sent; } return LIBUSB_SUCCESS; } #define usb_read( klncgpu, buf, bufsiz, processed) _usb_rw(klncgpu, buf, bufsiz, processed, 1 | LIBUSB_ENDPOINT_IN) #define usb_write(klncgpu, buf, bufsiz, processed) _usb_rw(klncgpu, buf, bufsiz, processed, 1 | LIBUSB_ENDPOINT_OUT) static void usb_nodev(__maybe_unused struct cgpu_info * const klncgpu) { // TODO } static void usb_uninit(struct cgpu_info * const klncgpu) { struct klondike_info * const klninfo = klncgpu->device_data; libusb_release_interface(klninfo->usbdev_handle, 0); libusb_close(klninfo->usbdev_handle); } static double cvtKlnToC(uint8_t temp) { double Rt, stein, celsius; if (temp == 0) return 0.0; Rt = 1000.0 * 255.0 / (double)temp - 1000.0; stein = log(Rt / 2200.0) / 3987.0; stein += 1.0 / (double)(25.0 + 273.15); celsius = (1.0 / stein) - 273.15; // For display of bad data if (celsius < 0.0) celsius = 0.0; if (celsius > 200.0) celsius = 200.0; return celsius; } static int cvtCToKln(double deg) { double Rt, stein, temp; if (deg < 0.0) deg = 0.0; stein = 1.0 / (deg + 273.15); stein -= 1.0 / (double)(25.0 + 273.15); Rt = exp(stein * 3987.0) * 2200.0; if (Rt == -1000.0) Rt++; temp = 1000.0 * 256.0 / (Rt + 1000.0); if (temp > 255) temp = 255; if (temp < 0) temp = 0; return (int)temp; } // Change this to LOG_WARNING if you wish to always see the replies #define READ_DEBUG LOG_DEBUG static void display_kline(struct cgpu_info *klncgpu, KLINE *kline, const char *msg) { const struct klondike_info * const klninfo = klncgpu->device_data; switch (kline->hd.cmd) { case KLN_CMD_NONCE: applog(READ_DEBUG, "%s%i:%d %s work [%c] dev=%d workid=%d" " nonce=0x%08x", klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), msg, kline->wr.cmd, (int)(kline->wr.dev), (int)(kline->wr.workid), (unsigned int)K_NONCE(kline->wr.nonce) + klninfo->nonce_offset); break; case KLN_CMD_STATUS: case KLN_CMD_WORK: case KLN_CMD_ENABLE: case KLN_CMD_ABORT: applog(READ_DEBUG, "%s%i:%d %s status [%c] dev=%d chips=%d" " slaves=%d workcq=%d workid=%d temp=%d fan=%d" " errors=%d hashes=%d max=%d noise=%d", klncgpu->drv->name, klncgpu->device_id, (int)(kline->ws.dev), msg, kline->ws.cmd, (int)(kline->ws.dev), (int)(kline->ws.chipcount), (int)(kline->ws.slavecount), (int)(kline->ws.workqc), (int)(kline->ws.workid), (int)(kline->ws.temp), (int)(kline->ws.fanspeed), (int)(kline->ws.errorcount), K_HASHCOUNT(kline->ws.hashcount), K_MAXCOUNT(kline->ws.maxcount), (int)(kline->ws.noise)); break; case KLN_CMD_CONFIG: applog(READ_DEBUG, "%s%i:%d %s config [%c] dev=%d clock=%d" " temptarget=%d tempcrit=%d fan=%d", klncgpu->drv->name, klncgpu->device_id, (int)(kline->cfg.dev), msg, kline->cfg.cmd, (int)(kline->cfg.dev), K_HASHCLOCK(kline->cfg.hashclock), (int)(kline->cfg.temptarget), (int)(kline->cfg.tempcritical), (int)(kline->cfg.fantarget)); break; case KLN_CMD_IDENT: applog(READ_DEBUG, "%s%i:%d %s info [%c] version=0x%02x prod=%.7s" " serial=0x%08x", klncgpu->drv->name, klncgpu->device_id, (int)(kline->hd.dev), msg, kline->hd.cmd, (int)(kline->id.version), kline->id.product, (unsigned int)K_SERIAL(kline->id.serial)); break; default: { char hexdata[REPLY_SIZE * 2]; bin2hex(hexdata, &kline->hd.dev, REPLY_SIZE - 1); applog(LOG_ERR, "%s%i:%d %s [%c:%s] unknown and ignored", klncgpu->drv->name, klncgpu->device_id, (int)(kline->hd.dev), msg, kline->hd.cmd, hexdata); break; } } } static void display_send_kline(struct cgpu_info *klncgpu, KLINE *kline, const char *msg) { switch (kline->hd.cmd) { case KLN_CMD_WORK: applog(READ_DEBUG, "%s%i:%d %s work [%c] dev=%d workid=0x%02x ...", klncgpu->drv->name, klncgpu->device_id, (int)(kline->wt.dev), msg, kline->ws.cmd, (int)(kline->wt.dev), (int)(kline->wt.workid)); break; case KLN_CMD_CONFIG: applog(READ_DEBUG, "%s%i:%d %s config [%c] dev=%d clock=%d" " temptarget=%d tempcrit=%d fan=%d", klncgpu->drv->name, klncgpu->device_id, (int)(kline->cfg.dev), msg, kline->cfg.cmd, (int)(kline->cfg.dev), K_HASHCLOCK(kline->cfg.hashclock), (int)(kline->cfg.temptarget), (int)(kline->cfg.tempcritical), (int)(kline->cfg.fantarget)); break; case KLN_CMD_IDENT: case KLN_CMD_STATUS: case KLN_CMD_ABORT: applog(READ_DEBUG, "%s%i:%d %s cmd [%c]", klncgpu->drv->name, klncgpu->device_id, (int)(kline->hd.dev), msg, kline->hd.cmd); break; case KLN_CMD_ENABLE: applog(READ_DEBUG, "%s%i:%d %s enable [%c] enable=%c", klncgpu->drv->name, klncgpu->device_id, (int)(kline->hd.dev), msg, kline->hd.cmd, (char)(kline->hd.buf[0])); break; case KLN_CMD_NONCE: default: { char hexdata[REPLY_SIZE * 2]; bin2hex(hexdata, (unsigned char *)&(kline->hd.dev), REPLY_SIZE - 1); applog(LOG_ERR, "%s%i:%d %s [%c:%s] unknown/unexpected and ignored", klncgpu->drv->name, klncgpu->device_id, (int)(kline->hd.dev), msg, kline->hd.cmd, hexdata); break; } } } static bool SendCmd(struct cgpu_info *klncgpu, KLINE *kline, int datalen) { struct klondike_info * const klninfo = klncgpu->device_data; int err, amt, writ; if (klninfo->usbinfo_nodev) return false; display_send_kline(klncgpu, kline, msg_send); writ = KSENDHD(datalen); err = usb_write(klncgpu, kline, writ, &amt); if (err < 0 || amt != writ) { applog(LOG_ERR, "%s%i:%d Cmd:%c Dev:%d, write failed (%d:%d:%d)", klncgpu->drv->name, klncgpu->device_id, (int)(kline->hd.dev), kline->hd.cmd, (int)(kline->hd.dev), writ, amt, err); return false; } return true; } static KLIST *GetReply(struct cgpu_info *klncgpu, uint8_t cmd, uint8_t dev) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); KLIST *kitem; int retries = CMD_REPLY_RETRIES; while (retries-- > 0 && klncgpu->shutdown == false) { cgsleep_ms(REPLY_WAIT_TIME); cg_rlock(&klninfo->klist_lock); kitem = klninfo->used; while (kitem) { if (kitem->kline.hd.cmd == cmd && kitem->kline.hd.dev == dev && kitem->ready == true && kitem->working == false) { kitem->working = true; cg_runlock(&klninfo->klist_lock); return kitem; } kitem = kitem->next; } cg_runlock(&klninfo->klist_lock); } return NULL; } static KLIST *SendCmdGetReply(struct cgpu_info *klncgpu, KLINE *kline, int datalen) { if (!SendCmd(klncgpu, kline, datalen)) return NULL; return GetReply(klncgpu, kline->hd.cmd, kline->hd.dev); } static bool klondike_get_stats(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); KLIST *kitem; KLINE kline; int slaves, dev; uint8_t temp = 0xFF; if (klninfo->usbinfo_nodev || klninfo->status == NULL) return false; applog(LOG_DEBUG, "%s%i: getting status", klncgpu->drv->name, klncgpu->device_id); rd_lock(&(klninfo->stat_lock)); slaves = klninfo->status[0].kline.ws.slavecount; rd_unlock(&(klninfo->stat_lock)); // loop thru devices and get status for each for (dev = 0; dev <= slaves; dev++) { zero_kline(&kline); kline.hd.cmd = KLN_CMD_STATUS; kline.hd.dev = dev; kitem = SendCmdGetReply(klncgpu, &kline, 0); if (kitem != NULL) { wr_lock(&(klninfo->stat_lock)); memcpy((void *)(&(klninfo->status[dev])), (void *)kitem, sizeof(klninfo->status[dev])); wr_unlock(&(klninfo->stat_lock)); kitem = release_kitem(klncgpu, kitem); } else { applog(LOG_ERR, "%s%i:%d failed to update stats", klncgpu->drv->name, klncgpu->device_id, dev); } if (klninfo->status[dev].kline.ws.temp < temp) temp = klninfo->status[dev].kline.ws.temp; } klncgpu->temp = cvtKlnToC(temp); return true; } // TODO: this only enables the master (no slaves) static bool kln_enable(struct cgpu_info *klncgpu) { KLIST *kitem; KLINE kline; int tries = 2; bool ok = false; zero_kline(&kline); kline.hd.cmd = KLN_CMD_ENABLE; kline.hd.dev = 0; kline.hd.buf[0] = KLN_CMD_ENABLE_ON; while (tries-- > 0) { kitem = SendCmdGetReply(klncgpu, &kline, 1); if (kitem) { kitem = release_kitem(klncgpu, kitem); ok = true; break; } cgsleep_ms(50); } if (ok) cgsleep_ms(50); return ok; } static void kln_disable(struct cgpu_info *klncgpu, int dev, bool all) { KLINE kline; int i; zero_kline(&kline); kline.hd.cmd = KLN_CMD_ENABLE; kline.hd.buf[0] = KLN_CMD_ENABLE_OFF; for (i = (all ? 0 : dev); i <= dev; i++) { kline.hd.dev = i; SendCmd(klncgpu, &kline, KSENDHD(1)); } } static bool klondike_init(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); KLIST *kitem; KLINE kline; int slaves, dev; klninfo->initialised = false; zero_kline(&kline); kline.hd.cmd = KLN_CMD_STATUS; kline.hd.dev = 0; kitem = SendCmdGetReply(klncgpu, &kline, 0); if (kitem == NULL) return false; slaves = kitem->kline.ws.slavecount; if (klninfo->status == NULL) { applog(LOG_DEBUG, "%s%i: initializing data", klncgpu->drv->name, klncgpu->device_id); // alloc space for status, devinfo, cfg and jobque for master and slaves klninfo->status = calloc(slaves+1, sizeof(*(klninfo->status))); if (unlikely(!klninfo->status)) quit(1, "Failed to calloc status array in klondke_get_stats"); klninfo->devinfo = calloc(slaves+1, sizeof(*(klninfo->devinfo))); if (unlikely(!klninfo->devinfo)) quit(1, "Failed to calloc devinfo array in klondke_get_stats"); klninfo->cfg = calloc(slaves+1, sizeof(*(klninfo->cfg))); if (unlikely(!klninfo->cfg)) quit(1, "Failed to calloc cfg array in klondke_get_stats"); klninfo->jobque = calloc(slaves+1, sizeof(*(klninfo->jobque))); if (unlikely(!klninfo->jobque)) quit(1, "Failed to calloc jobque array in klondke_get_stats"); } memcpy((void *)(&(klninfo->status[0])), (void *)kitem, sizeof(klninfo->status[0])); kitem = release_kitem(klncgpu, kitem); // zero init triggers read back only zero_kline(&kline); kline.cfg.cmd = KLN_CMD_CONFIG; int size = 2; // boundaries are checked by device, with valid values returned if (opt_klondike_options != NULL) { int hashclock; double temptarget; sscanf(opt_klondike_options, "%d:%lf", &hashclock, &temptarget); SET_HASHCLOCK(kline.cfg.hashclock, hashclock); kline.cfg.temptarget = cvtCToKln(temptarget); kline.cfg.tempcritical = 0; // hard code for old firmware kline.cfg.fantarget = 0xff; // hard code for old firmware size = sizeof(kline.cfg) - 2; } for (dev = 0; dev <= slaves; dev++) { kline.cfg.dev = dev; kitem = SendCmdGetReply(klncgpu, &kline, size); if (kitem != NULL) { memcpy((void *)&(klninfo->cfg[dev]), kitem, sizeof(klninfo->cfg[dev])); applog(LOG_WARNING, "%s%i:%d config (%d: Clk: %d, T:%.0lf, C:%.0lf, F:%d)", klncgpu->drv->name, klncgpu->device_id, dev, dev, K_HASHCLOCK(klninfo->cfg[dev].kline.cfg.hashclock), cvtKlnToC(klninfo->cfg[dev].kline.cfg.temptarget), cvtKlnToC(klninfo->cfg[dev].kline.cfg.tempcritical), (int)100*klninfo->cfg[dev].kline.cfg.fantarget/256); kitem = release_kitem(klncgpu, kitem); } } klondike_get_stats(klncgpu); klninfo->initialised = true; for (dev = 0; dev <= slaves; dev++) { klninfo->devinfo[dev].rangesize = ((uint64_t)1<<32) / klninfo->status[dev].kline.ws.chipcount; klninfo->devinfo[dev].chipstats = calloc(klninfo->status[dev].kline.ws.chipcount*2 , sizeof(uint32_t)); } bool ok = kln_enable(klncgpu); if (!ok) applog(LOG_ERR, "%s%i: failed to enable", klncgpu->drv->name, klncgpu->device_id); return ok; } static void control_init(struct cgpu_info *klncgpu) { struct klondike_info * const klninfo = klncgpu->device_data; int err, interface; if (klninfo->usbinfo_nodev) return; interface = 0; err = libusb_control_transfer(klninfo->usbdev_handle, 0, 9, 1, interface, NULL, 0, 999); applog(LOG_DEBUG, "%s%i: reset got err %d", klncgpu->drv->name, klncgpu->device_id, err); } static bool klondike_lowl_match(const struct lowlevel_device_info * const info) { if (!lowlevel_match_id(info, &lowl_usb, 0x04d8, 0xf60a)) return false; return (info->manufacturer && strstr(info->manufacturer, "Klondike")); } static bool klondike_lowl_probe(const struct lowlevel_device_info * const info) { if (unlikely(info->lowl != &lowl_usb)) { applog(LOG_DEBUG, "%s: Matched \"%s\" serial \"%s\", but lowlevel driver is not usb!", __func__, info->product, info->serial); return false; } struct libusb_device * const dev = info->lowl_data; if (bfg_claim_libusb(&klondike_drv, true, dev)) return false; // static bool klondike_detect_one(struct libusb_device *dev, struct usb_find_devices *found) struct cgpu_info * const klncgpu = malloc(sizeof(*klncgpu)); struct klondike_info *klninfo = NULL; KLINE kline; if (unlikely(!klncgpu)) quit(1, "Failed to calloc klncgpu in klondike_detect_one"); *klncgpu = (struct cgpu_info){ .drv = &klondike_drv, .deven = DEV_ENABLED, .threads = 1, .cutofftemp = (int)KLN_KILLWORK_TEMP, }; klninfo = calloc(1, sizeof(*klninfo)); if (unlikely(!klninfo)) quit(1, "Failed to calloc klninfo in klondke_detect_one"); klncgpu->device_data = (void *)klninfo; klninfo->free = new_klist_set(klncgpu); if (usb_init(klncgpu, dev)) { int sent, recd, err; KLIST kitem; int attempts = 0; klncgpu->device_path = strdup(info->devid); control_init(klncgpu); while (attempts++ < 3) { kline.hd.cmd = KLN_CMD_IDENT; kline.hd.dev = 0; display_send_kline(klncgpu, &kline, msg_detect_send); err = usb_write(klncgpu, (char *)&(kline.hd), 2, &sent); if (err < 0 || sent != 2) { applog(LOG_ERR, "%s (%s) detect write failed (%d:%d)", klncgpu->drv->dname, klncgpu->device_path, sent, err); } cgsleep_ms(REPLY_WAIT_TIME*10); err = usb_read(klncgpu, &kitem.kline, REPLY_SIZE, &recd); if (err < 0) { applog(LOG_ERR, "%s (%s) detect read failed (%d:%d)", klncgpu->drv->dname, klncgpu->device_path, recd, err); } else if (recd < 1) { applog(LOG_ERR, "%s (%s) detect empty reply (%d)", klncgpu->drv->dname, klncgpu->device_path, recd); } else if (kitem.kline.hd.cmd == KLN_CMD_IDENT && kitem.kline.hd.dev == 0) { display_kline(klncgpu, &kitem.kline, msg_detect_reply); applog(LOG_DEBUG, "%s (%s) detect successful (%d attempt%s)", klncgpu->drv->dname, klncgpu->device_path, attempts, attempts == 1 ? "" : "s"); if (!add_cgpu(klncgpu)) break; applog(LOG_DEBUG, "Klondike cgpu added"); rwlock_init(&klninfo->stat_lock); cglock_init(&klninfo->klist_lock); return true; } } usb_uninit(klncgpu); } free(klninfo->free); free(klninfo); free(klncgpu); return false; } static void klondike_check_nonce(struct cgpu_info *klncgpu, KLIST *kitem) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); struct work *work, *look, *tmp; KLINE *kline = &(kitem->kline); struct timeval tv_now; double us_diff; uint32_t nonce = K_NONCE(kline->wr.nonce) + klninfo->nonce_offset; applog(LOG_DEBUG, "%s%i:%d FOUND NONCE (%02x:%08x)", klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), kline->wr.workid, (unsigned int)nonce); work = NULL; cgtime(&tv_now); rd_lock(&(klncgpu->qlock)); HASH_ITER(hh, klncgpu->queued_work, look, tmp) { if (ms_tdiff(&tv_now, &(look->tv_stamp)) < OLD_WORK_MS && (look->subid == (kline->wr.dev*256 + kline->wr.workid))) { work = look; break; } } rd_unlock(&(klncgpu->qlock)); if (work) { if (unlikely(!klninfo->nonce_offset)) { bool test_c0 = test_nonce(work, nonce - 0xc0, false); bool test_180 = test_nonce(work, nonce - 0x180, false); if (test_c0) { if (unlikely(test_180)) { applog(LOG_DEBUG, "%s: Matched both c0 and 180 offsets (%02x:%08lx)", klncgpu->dev_repr, kline->wr.workid, (unsigned long)nonce); submit_nonce(klncgpu->thr[0], work, nonce - 0x180); nonce -= 0xc0; } else { applog(LOG_DEBUG, "%s: Matched c0 offset (%02x:%08lx)", klncgpu->dev_repr, kline->wr.workid, (unsigned long)nonce); nonce += (klninfo->nonce_offset = -0xc0); } } else if (test_180) { applog(LOG_DEBUG, "%s: Matched 180 offset (%02x:%08lx)", klncgpu->dev_repr, kline->wr.workid, (unsigned long)nonce); nonce += (klninfo->nonce_offset = -0x180); } else applog(LOG_DEBUG, "%s: Matched neither c0 nor 180 offset (%02x:%08lx)", klncgpu->dev_repr, kline->wr.workid, (unsigned long)nonce); } wr_lock(&(klninfo->stat_lock)); klninfo->devinfo[kline->wr.dev].noncecount++; klninfo->noncecount++; wr_unlock(&(klninfo->stat_lock)); applog(LOG_DEBUG, "%s%i:%d SUBMIT NONCE (%02x:%08x)", klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), kline->wr.workid, (unsigned int)nonce); cgtime(&tv_now); bool ok = submit_nonce(klncgpu->thr[0], work, nonce); applog(LOG_DEBUG, "%s%i:%d chip stats %d, %08x, %d, %d", klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), kline->wr.dev, (unsigned int)nonce, klninfo->devinfo[kline->wr.dev].rangesize, klninfo->status[kline->wr.dev].kline.ws.chipcount); klninfo->devinfo[kline->wr.dev].chipstats[(nonce / klninfo->devinfo[kline->wr.dev].rangesize) + (ok ? 0 : klninfo->status[kline->wr.dev].kline.ws.chipcount)]++; us_diff = us_tdiff(&tv_now, &(kitem->tv_when)); if (klninfo->delay_count == 0) { klninfo->delay_min = us_diff; klninfo->delay_max = us_diff; } else { if (klninfo->delay_min > us_diff) klninfo->delay_min = us_diff; if (klninfo->delay_max < us_diff) klninfo->delay_max = us_diff; } klninfo->delay_count++; klninfo->delay_total += us_diff; if (klninfo->nonce_count > 0) { us_diff = us_tdiff(&(kitem->tv_when), &(klninfo->tv_last_nonce_received)); if (klninfo->nonce_count == 1) { klninfo->nonce_min = us_diff; klninfo->nonce_max = us_diff; } else { if (klninfo->nonce_min > us_diff) klninfo->nonce_min = us_diff; if (klninfo->nonce_max < us_diff) klninfo->nonce_max = us_diff; } klninfo->nonce_total += us_diff; } klninfo->nonce_count++; memcpy(&(klninfo->tv_last_nonce_received), &(kitem->tv_when), sizeof(klninfo->tv_last_nonce_received)); return; } applog(LOG_ERR, "%s%i:%d unknown work (%02x:%08x) - ignored", klncgpu->drv->name, klncgpu->device_id, (int)(kline->wr.dev), kline->wr.workid, (unsigned int)nonce); //inc_hw_errors(klncgpu->thr[0]); } // thread to keep looking for replies static void *klondike_get_replies(void *userdata) { struct cgpu_info *klncgpu = (struct cgpu_info *)userdata; struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); KLIST *kitem = NULL; int err, recd, slaves, dev, isc; bool overheat, sent; applog(LOG_DEBUG, "%s%i: listening for replies", klncgpu->drv->name, klncgpu->device_id); while (klncgpu->shutdown == false) { if (klninfo->usbinfo_nodev) return NULL; if (kitem == NULL) kitem = allocate_kitem(klncgpu); else memset((void *)&(kitem->kline), 0, sizeof(kitem->kline)); err = usb_read(klncgpu, &kitem->kline, REPLY_SIZE, &recd); if (err || recd != REPLY_SIZE) { if (err != -7) applog(LOG_ERR, "%s%i: reply err=%d amt=%d", klncgpu->drv->name, klncgpu->device_id, err, recd); } if (!err && recd == REPLY_SIZE) { cgtime(&(kitem->tv_when)); rd_lock(&(klninfo->stat_lock)); kitem->block_seq = klninfo->block_seq; rd_unlock(&(klninfo->stat_lock)); if (opt_log_level <= READ_DEBUG) { char hexdata[recd * 2]; bin2hex(hexdata, &kitem->kline.hd.dev, recd-1); applog(READ_DEBUG, "%s%i:%d reply [%c:%s]", klncgpu->drv->name, klncgpu->device_id, (int)(kitem->kline.hd.dev), kitem->kline.hd.cmd, hexdata); } // We can't check this until it's initialised if (klninfo->initialised) { rd_lock(&(klninfo->stat_lock)); slaves = klninfo->status[0].kline.ws.slavecount; rd_unlock(&(klninfo->stat_lock)); if (kitem->kline.hd.dev > slaves) { applog(LOG_ERR, "%s%i: reply [%c] has invalid dev=%d (max=%d) using 0", klncgpu->drv->name, klncgpu->device_id, (char)(kitem->kline.hd.cmd), (int)(kitem->kline.hd.dev), slaves); /* TODO: this is rather problematic if there are slaves * however without slaves - it should always be zero */ kitem->kline.hd.dev = 0; } else { wr_lock(&(klninfo->stat_lock)); klninfo->jobque[kitem->kline.hd.dev].late_update_sequential = 0; wr_unlock(&(klninfo->stat_lock)); } } switch (kitem->kline.hd.cmd) { case KLN_CMD_NONCE: klondike_check_nonce(klncgpu, kitem); display_kline(klncgpu, &kitem->kline, msg_reply); break; case KLN_CMD_WORK: // We can't do/check this until it's initialised if (klninfo->initialised) { dev = kitem->kline.ws.dev; if (kitem->kline.ws.workqc == 0) { bool idle = false; rd_lock(&(klninfo->stat_lock)); if (klninfo->jobque[dev].flushed == false) idle = true; slaves = klninfo->status[0].kline.ws.slavecount; rd_unlock(&(klninfo->stat_lock)); if (idle) applog(LOG_WARNING, "%s%i:%d went idle before work was sent", klncgpu->drv->name, klncgpu->device_id, dev); } wr_lock(&(klninfo->stat_lock)); klninfo->jobque[dev].flushed = false; wr_unlock(&(klninfo->stat_lock)); } case KLN_CMD_STATUS: case KLN_CMD_ABORT: // We can't do/check this until it's initialised if (klninfo->initialised) { isc = 0; dev = kitem->kline.ws.dev; wr_lock(&(klninfo->stat_lock)); klninfo->jobque[dev].workqc = (int)(kitem->kline.ws.workqc); cgtime(&(klninfo->jobque[dev].last_update)); slaves = klninfo->status[0].kline.ws.slavecount; overheat = klninfo->jobque[dev].overheat; if (dev == 0) { if (kitem->kline.ws.slavecount != slaves) isc = ++klninfo->incorrect_slave_sequential; else isc = klninfo->incorrect_slave_sequential = 0; } wr_unlock(&(klninfo->stat_lock)); if (isc) { applog(LOG_ERR, "%s%i:%d reply [%c] has a diff" " # of slaves=%d (curr=%d)%s", klncgpu->drv->name, klncgpu->device_id, dev, (char)(kitem->kline.ws.cmd), (int)(kitem->kline.ws.slavecount), slaves, isc <= KLN_ISS_IGNORE ? "" : " disabling device"); if (isc > KLN_ISS_IGNORE) usb_nodev(klncgpu); break; } if (!overheat) { double temp = cvtKlnToC(kitem->kline.ws.temp); if (temp >= KLN_KILLWORK_TEMP) { KLINE kline; wr_lock(&(klninfo->stat_lock)); klninfo->jobque[dev].overheat = true; wr_unlock(&(klninfo->stat_lock)); applog(LOG_WARNING, "%s%i:%d Critical overheat (%.0fC)", klncgpu->drv->name, klncgpu->device_id, dev, temp); zero_kline(&kline); kline.hd.cmd = KLN_CMD_ABORT; kline.hd.dev = dev; sent = SendCmd(klncgpu, &kline, KSENDHD(0)); kln_disable(klncgpu, dev, false); if (!sent) { applog(LOG_ERR, "%s%i:%d overheat failed to" " abort work - disabling device", klncgpu->drv->name, klncgpu->device_id, dev); usb_nodev(klncgpu); } } } } case KLN_CMD_ENABLE: wr_lock(&(klninfo->stat_lock)); klninfo->errorcount += kitem->kline.ws.errorcount; klninfo->noisecount += kitem->kline.ws.noise; wr_unlock(&(klninfo->stat_lock)); display_kline(klncgpu, &kitem->kline, msg_reply); kitem->ready = true; kitem = NULL; break; case KLN_CMD_CONFIG: display_kline(klncgpu, &kitem->kline, msg_reply); kitem->ready = true; kitem = NULL; break; case KLN_CMD_IDENT: display_kline(klncgpu, &kitem->kline, msg_reply); kitem->ready = true; kitem = NULL; break; default: display_kline(klncgpu, &kitem->kline, msg_reply); break; } } } return NULL; } static void klondike_flush_work(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); KLIST *kitem; KLINE kline; int slaves, dev; if (klninfo->initialised) { wr_lock(&(klninfo->stat_lock)); klninfo->block_seq++; slaves = klninfo->status[0].kline.ws.slavecount; wr_unlock(&(klninfo->stat_lock)); applog(LOG_DEBUG, "%s%i: flushing work", klncgpu->drv->name, klncgpu->device_id); zero_kline(&kline); kline.hd.cmd = KLN_CMD_ABORT; for (dev = 0; dev <= slaves; dev++) { kline.hd.dev = dev; kitem = SendCmdGetReply(klncgpu, &kline, KSENDHD(0)); if (kitem != NULL) { wr_lock(&(klninfo->stat_lock)); memcpy((void *)&(klninfo->status[dev]), kitem, sizeof(klninfo->status[dev])); klninfo->jobque[dev].flushed = true; wr_unlock(&(klninfo->stat_lock)); kitem = release_kitem(klncgpu, kitem); } } } } static bool klondike_thread_prepare(struct thr_info *thr) { struct cgpu_info *klncgpu = thr->cgpu; struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); if (thr_info_create(&(klninfo->replies_thr), NULL, klondike_get_replies, (void *)klncgpu)) { applog(LOG_ERR, "%s%i: thread create failed", klncgpu->drv->name, klncgpu->device_id); return false; } pthread_detach(klninfo->replies_thr.pth); // let the listening get started cgsleep_ms(100); return klondike_init(klncgpu); } static bool klondike_thread_init(struct thr_info *thr) { struct cgpu_info *klncgpu = thr->cgpu; struct klondike_info * const klninfo = klncgpu->device_data; notifier_init(thr->work_restart_notifier); if (klninfo->usbinfo_nodev) return false; klondike_flush_work(klncgpu); return true; } static void klondike_shutdown(struct thr_info *thr) { struct cgpu_info *klncgpu = thr->cgpu; struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); applog(LOG_DEBUG, "%s%i: shutting down work", klncgpu->drv->name, klncgpu->device_id); kln_disable(klncgpu, klninfo->status[0].kline.ws.slavecount, true); klncgpu->shutdown = true; } static void klondike_thread_enable(struct thr_info *thr) { struct cgpu_info *klncgpu = thr->cgpu; struct klondike_info * const klninfo = klncgpu->device_data; if (klninfo->usbinfo_nodev) return; /* KLINE kline; zero_kline(&kline); kline.hd.cmd = KLN_CMD_ENABLE; kline.hd.dev = dev; kline.hd.buf[0] = KLN_CMD_ENABLE_OFF; kitem = SendCmdGetReply(klncgpu, &kline, KSENDHD(1)); */ } static bool klondike_send_work(struct cgpu_info *klncgpu, int dev, struct work *work) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); struct work *look, *tmp; KLINE kline; struct timeval tv_old; int wque_size, wque_cleared; if (klninfo->usbinfo_nodev) return false; zero_kline(&kline); kline.wt.cmd = KLN_CMD_WORK; kline.wt.dev = dev; memcpy(kline.wt.midstate, work->midstate, MIDSTATE_BYTES); memcpy(kline.wt.merkle, work->data + MERKLE_OFFSET, MERKLE_BYTES); kline.wt.workid = (uint8_t)(klninfo->devinfo[dev].nextworkid++ & 0xFF); work->subid = dev*256 + kline.wt.workid; cgtime(&work->tv_stamp); if (opt_log_level <= LOG_DEBUG) { char hexdata[(sizeof(kline.wt) * 2) + 1]; bin2hex(hexdata, &kline.wt, sizeof(kline.wt)); applog(LOG_DEBUG, "WORKDATA: %s", hexdata); } applog(LOG_DEBUG, "%s%i:%d sending work (%d:%02x)", klncgpu->drv->name, klncgpu->device_id, dev, dev, kline.wt.workid); KLIST *kitem = SendCmdGetReply(klncgpu, &kline, sizeof(kline.wt)); if (kitem != NULL) { wr_lock(&(klninfo->stat_lock)); memcpy((void *)&(klninfo->status[dev]), kitem, sizeof(klninfo->status[dev])); wr_unlock(&(klninfo->stat_lock)); kitem = release_kitem(klncgpu, kitem); // remove old work wque_size = 0; wque_cleared = 0; cgtime(&tv_old); wr_lock(&klncgpu->qlock); HASH_ITER(hh, klncgpu->queued_work, look, tmp) { if (ms_tdiff(&tv_old, &(look->tv_stamp)) > OLD_WORK_MS) { __work_completed(klncgpu, look); free_work(look); wque_cleared++; } else wque_size++; } wr_unlock(&klncgpu->qlock); wr_lock(&(klninfo->stat_lock)); klninfo->wque_size = wque_size; klninfo->wque_cleared = wque_cleared; wr_unlock(&(klninfo->stat_lock)); return true; } return false; } static bool klondike_queue_full(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); struct work *work = NULL; int dev, queued, slaves, seq, howlong; struct timeval now; bool nowork; if (klncgpu->shutdown == true) return true; cgtime(&now); rd_lock(&(klninfo->stat_lock)); slaves = klninfo->status[0].kline.ws.slavecount; for (dev = 0; dev <= slaves; dev++) if (ms_tdiff(&now, &(klninfo->jobque[dev].last_update)) > LATE_UPDATE_MS) { klninfo->jobque[dev].late_update_count++; seq = ++klninfo->jobque[dev].late_update_sequential; rd_unlock(&(klninfo->stat_lock)); if (seq < LATE_UPDATE_LIMIT) { applog(LOG_DEBUG, "%s%i:%d late update", klncgpu->drv->name, klncgpu->device_id, dev); klondike_get_stats(klncgpu); goto que; } else { applog(LOG_WARNING, "%s%i:%d late update (%d) reached - attempting reset", klncgpu->drv->name, klncgpu->device_id, dev, LATE_UPDATE_LIMIT); control_init(klncgpu); kln_enable(klncgpu); klondike_get_stats(klncgpu); rd_lock(&(klninfo->stat_lock)); howlong = ms_tdiff(&now, &(klninfo->jobque[dev].last_update)); if (howlong > LATE_UPDATE_MS) { rd_unlock(&(klninfo->stat_lock)); if (howlong > LATE_UPDATE_NODEV_MS) { applog(LOG_ERR, "%s%i:%d reset failed - dropping device", klncgpu->drv->name, klncgpu->device_id, dev); usb_nodev(klncgpu); } else cgsleep_ms(LATE_UPDATE_SLEEP_MS); return true; } break; } } rd_unlock(&(klninfo->stat_lock)); que: nowork = true; for (queued = 0; queued < MAX_WORK_COUNT-1; queued++) for (dev = 0; dev <= slaves; dev++) { tryagain: rd_lock(&(klninfo->stat_lock)); if (klninfo->jobque[dev].overheat) { double temp = cvtKlnToC(klninfo->status[0].kline.ws.temp); if ((queued == MAX_WORK_COUNT-2) && ms_tdiff(&now, &(klninfo->jobque[dev].last_update)) > (LATE_UPDATE_MS/2)) { rd_unlock(&(klninfo->stat_lock)); klondike_get_stats(klncgpu); goto tryagain; } if (temp <= KLN_COOLED_DOWN) { klninfo->jobque[dev].overheat = false; rd_unlock(&(klninfo->stat_lock)); applog(LOG_WARNING, "%s%i:%d Overheat recovered (%.0fC)", klncgpu->drv->name, klncgpu->device_id, dev, temp); kln_enable(klncgpu); goto tryagain; } else { rd_unlock(&(klninfo->stat_lock)); continue; } } if (klninfo->jobque[dev].workqc <= queued) { rd_unlock(&(klninfo->stat_lock)); if (!work) work = get_queued(klncgpu); if (unlikely(!work)) return false; nowork = false; if (klondike_send_work(klncgpu, dev, work)) return false; } else rd_unlock(&(klninfo->stat_lock)); } if (nowork) cgsleep_ms(10); // avoid a hard loop in case we have nothing to do return true; } static int64_t klondike_scanwork(struct thr_info *thr) { struct cgpu_info *klncgpu = thr->cgpu; struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); int64_t newhashcount = 0; int dev, slaves; if (klninfo->usbinfo_nodev) return -1; restart_wait(thr, 200); if (klninfo->status != NULL) { rd_lock(&(klninfo->stat_lock)); slaves = klninfo->status[0].kline.ws.slavecount; for (dev = 0; dev <= slaves; dev++) { uint64_t newhashdev = 0, hashcount; int maxcount; hashcount = K_HASHCOUNT(klninfo->status[dev].kline.ws.hashcount); maxcount = K_MAXCOUNT(klninfo->status[dev].kline.ws.maxcount); // todo: chg this to check workid for wrapped instead if (klninfo->devinfo[dev].lasthashcount > hashcount) newhashdev += maxcount; // hash counter wrapped newhashdev += hashcount - klninfo->devinfo[dev].lasthashcount; klninfo->devinfo[dev].lasthashcount = hashcount; if (maxcount != 0) klninfo->hashcount += (newhashdev << 32) / maxcount; } newhashcount += 0xffffffffull * (uint64_t)klninfo->noncecount; klninfo->noncecount = 0; rd_unlock(&(klninfo->stat_lock)); } return newhashcount; } #ifdef HAVE_CURSES static void klondike_wlogprint_status(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); uint16_t fan = 0; uint16_t clock = 0; int dev, slaves; if (klninfo->status == NULL) { return; } rd_lock(&(klninfo->stat_lock)); slaves = klninfo->status[0].kline.ws.slavecount; for (dev = 0; dev <= slaves; dev++) { fan += klninfo->cfg[dev].kline.cfg.fantarget; clock += (uint16_t)K_HASHCLOCK(klninfo->cfg[dev].kline.cfg.hashclock); } rd_unlock(&(klninfo->stat_lock)); fan /= slaves + 1; fan = 100 * fan / 255; clock /= slaves + 1; wlogprint("Frequency: %d MHz\n", (int)clock); if (fan && fan <= 100) wlogprint("Fan speed: %d%%\n", fan); } #endif static struct api_data *klondike_api_stats(struct cgpu_info *klncgpu) { struct klondike_info *klninfo = (struct klondike_info *)(klncgpu->device_data); struct api_data *root = NULL; char buf[32]; int dev, slaves; if (klninfo->status == NULL) return NULL; rd_lock(&(klninfo->stat_lock)); slaves = klninfo->status[0].kline.ws.slavecount; for (dev = 0; dev <= slaves; dev++) { float fTemp = cvtKlnToC(klninfo->status[dev].kline.ws.temp); sprintf(buf, "Temp %d", dev); root = api_add_temp(root, buf, &fTemp, true); double dClk = (double)K_HASHCLOCK(klninfo->cfg[dev].kline.cfg.hashclock); sprintf(buf, "Clock %d", dev); root = api_add_freq(root, buf, &dClk, true); unsigned int iFan = (unsigned int)100 * klninfo->cfg[dev].kline.cfg.fantarget / 255; sprintf(buf, "Fan Percent %d", dev); root = api_add_int(root, buf, (int *)(&iFan), true); iFan = 0; if (klninfo->status[dev].kline.ws.fanspeed > 0) iFan = (unsigned int)TACH_FACTOR / klninfo->status[dev].kline.ws.fanspeed; sprintf(buf, "Fan RPM %d", dev); root = api_add_int(root, buf, (int *)(&iFan), true); if (klninfo->devinfo[dev].chipstats != NULL) { char data[2048]; char one[32]; int n; sprintf(buf, "Nonces / Chip %d", dev); data[0] = '\0'; for (n = 0; n < klninfo->status[dev].kline.ws.chipcount; n++) { snprintf(one, sizeof(one), "%07d ", klninfo->devinfo[dev].chipstats[n]); strcat(data, one); } root = api_add_string(root, buf, data, true); sprintf(buf, "Errors / Chip %d", dev); data[0] = '\0'; for (n = 0; n < klninfo->status[dev].kline.ws.chipcount; n++) { snprintf(one, sizeof(one), "%07d ", klninfo->devinfo[dev].chipstats[n + klninfo->status[dev].kline.ws.chipcount]); strcat(data, one); } root = api_add_string(root, buf, data, true); } } root = api_add_uint64(root, "Hash Count", &(klninfo->hashcount), true); root = api_add_uint64(root, "Error Count", &(klninfo->errorcount), true); root = api_add_uint64(root, "Noise Count", &(klninfo->noisecount), true); root = api_add_int(root, "KLine Limit", &(klninfo->kline_count), true); root = api_add_int(root, "KLine Used", &(klninfo->used_count), true); root = api_add_elapsed(root, "KQue Delay Count", &(klninfo->delay_count), true); root = api_add_elapsed(root, "KQue Delay Total", &(klninfo->delay_total), true); root = api_add_elapsed(root, "KQue Delay Min", &(klninfo->delay_min), true); root = api_add_elapsed(root, "KQue Delay Max", &(klninfo->delay_max), true); double avg; if (klninfo->delay_count == 0) avg = 0; else avg = klninfo->delay_total / klninfo->delay_count; root = api_add_diff(root, "KQue Delay Avg", &avg, true); root = api_add_elapsed(root, "KQue Nonce Count", &(klninfo->nonce_count), true); root = api_add_elapsed(root, "KQue Nonce Total", &(klninfo->nonce_total), true); root = api_add_elapsed(root, "KQue Nonce Min", &(klninfo->nonce_min), true); root = api_add_elapsed(root, "KQue Nonce Max", &(klninfo->nonce_max), true); if (klninfo->nonce_count == 0) avg = 0; else avg = klninfo->nonce_total / klninfo->nonce_count; root = api_add_diff(root, "KQue Nonce Avg", &avg, true); root = api_add_int(root, "WQue Size", &(klninfo->wque_size), true); root = api_add_int(root, "WQue Cleared", &(klninfo->wque_cleared), true); rd_unlock(&(klninfo->stat_lock)); return root; } struct device_drv klondike_drv = { .dname = "Klondike", .name = "KLN", .lowl_match = klondike_lowl_match, .lowl_probe = klondike_lowl_probe, .get_api_stats = klondike_api_stats, .get_stats = klondike_get_stats, .thread_prepare = klondike_thread_prepare, .thread_init = klondike_thread_init, .minerloop = hash_queued_work, .scanwork = klondike_scanwork, .queue_full = klondike_queue_full, .flush_work = klondike_flush_work, .thread_shutdown = klondike_shutdown, .thread_enable = klondike_thread_enable, #ifdef HAVE_CURSES .proc_wlogprint_status = klondike_wlogprint_status, #endif }; bfgminer-bfgminer-3.10.0/driver-knc.c000066400000000000000000000457221226556647300174150ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #ifdef HAVE_LINUX_I2C_DEV_USER_H #include #else #include #endif #include #include #include "deviceapi.h" #include "logging.h" #include "miner.h" #include "spidevc.h" #define KNC_POLL_INTERVAL_US 10000 #define KNC_SPI_SPEED 3000000 #define KNC_SPI_DELAY 0 #define KNC_SPI_MODE (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH) #define KNC_SPI_BITS 8 /* The core disable/enable strategy is as follows: If a core gets 10 HW errors in a row without doing any proper work it is disabled for 10 seconds. When a core gets 10 HW errors the next time it checks when it was enabled the last time and compare that to when it started to get errors. If those times are close (50%) the disabled time is doubled, if not it is just disabled for 10s again. */ #define KNC_MAX_HWERR_IN_ROW 10 #define KNC_HWERR_DISABLE_SECS (10) #define KNC_MAX_DISABLE_SECS (15 * 60) static const char * const i2cpath = "/dev/i2c-2"; #define KNC_I2C_TEMPLATE "/dev/i2c-%d" enum knc_request_cmd { KNC_REQ_SUBMIT_WORK = 2, KNC_REQ_FLUSH_QUEUE = 3, }; enum knc_reply_type { KNC_REPLY_NONCE_FOUND = 1, KNC_REPLY_WORK_DONE = 2, }; enum knc_i2c_core_status { KNC_I2CSTATUS_DISABLED = 2, KNC_I2CSTATUS_ENABLED = 3, }; BFG_REGISTER_DRIVER(knc_drv) struct knc_device { int i2c; struct spi_port *spi; struct cgpu_info *cgpu; bool need_flush; struct work *workqueue; int workqueue_size; int workqueue_max; int next_id; struct work *devicework; }; struct knc_core { int asicno; int coreno; float volt; float current; int hwerr_in_row; int hwerr_disable_time; struct timeval enable_at; struct timeval first_hwerr; }; static bool knc_detect_one(const char *devpath) { static struct cgpu_info *prev_cgpu = NULL; struct cgpu_info *cgpu; int i; const int fd = open(i2cpath, O_RDWR); char *leftover = NULL; const int i2cslave = strtol(devpath, &leftover, 0); uint8_t buf[0x20]; if (leftover && leftover[0]) return false; if (unlikely(fd == -1)) { applog(LOG_DEBUG, "%s: Failed to open %s", __func__, i2cpath); return false; } if (ioctl(fd, I2C_SLAVE, i2cslave)) { close(fd); applog(LOG_DEBUG, "%s: Failed to select i2c slave 0x%x", __func__, i2cslave); return false; } i = i2c_smbus_read_i2c_block_data(fd, 0, 0x20, buf); close(fd); if (-1 == i) { applog(LOG_DEBUG, "%s: 0x%x: Failed to read i2c block data", __func__, i2cslave); return false; } for (i = 0; ; ++i) { if (buf[i] == 3) break; if (i == 0x1f) return false; } cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &knc_drv, .device_path = strdup(devpath), .deven = DEV_ENABLED, .procs = 192, .threads = prev_cgpu ? 0 : 1, }; const bool rv = add_cgpu_slave(cgpu, prev_cgpu); prev_cgpu = cgpu; return rv; } static int knc_detect_auto(void) { const int first = 0x20, last = 0x26; char devpath[4]; int found = 0, i; for (i = first; i <= last; ++i) { sprintf(devpath, "%d", i); if (knc_detect_one(devpath)) ++found; } return found; } static void knc_detect(void) { generic_detect(&knc_drv, knc_detect_one, knc_detect_auto, GDF_REQUIRE_DNAME | GDF_DEFAULT_NOAUTO); } static bool knc_spi_open(const char *repr, struct spi_port * const spi) { const char * const spipath = "/dev/spidev1.0"; const int fd = open(spipath, O_RDWR); const uint8_t lsbfirst = 0; if (fd == -1) return false; if (ioctl(fd, SPI_IOC_WR_MODE , &spi->mode )) goto fail; if (ioctl(fd, SPI_IOC_WR_LSB_FIRST , &lsbfirst )) goto fail; if (ioctl(fd, SPI_IOC_WR_BITS_PER_WORD, &spi->bits )) goto fail; if (ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ , &spi->speed)) goto fail; spi->fd = fd; return true; fail: close(fd); spi->fd = -1; applog(LOG_WARNING, "%s: Failed to open %s", repr, spipath); return false; } static bool knc_spi_txrx(struct spi_port * const spi) { const void * const wrbuf = spi_gettxbuf(spi); void * const rdbuf = spi_getrxbuf(spi); const size_t bufsz = spi_getbufsz(spi); const int fd = spi->fd; struct spi_ioc_transfer xf = { .tx_buf = (uintptr_t) wrbuf, .rx_buf = (uintptr_t) rdbuf, .len = bufsz, .delay_usecs = spi->delay, .speed_hz = spi->speed, .bits_per_word = spi->bits, }; return (ioctl(fd, SPI_IOC_MESSAGE(1), &xf) > 0); } static void knc_clean_flush(struct spi_port * const spi) { const uint8_t flushcmd = KNC_REQ_FLUSH_QUEUE << 4; const size_t spi_req_sz = 0x1000; spi_clear_buf(spi); spi_emit_buf(spi, &flushcmd, 1); spi_emit_nop(spi, spi_req_sz - spi_getbufsz(spi)); applog(LOG_DEBUG, "%s: Issuing flush command to clear out device queues", knc_drv.dname); spi_txrx(spi); } static bool knc_init(struct thr_info * const thr) { const int max_cores = 192; struct thr_info *mythr; struct cgpu_info * const cgpu = thr->cgpu, *proc; struct knc_device *knc; struct knc_core *knccore; struct spi_port *spi; const int i2c = open(i2cpath, O_RDWR); int i2cslave, i, j; uint8_t buf[0x20]; if (unlikely(i2c == -1)) { applog(LOG_DEBUG, "%s: Failed to open %s", __func__, i2cpath); return false; } knc = malloc(sizeof(*knc)); for (proc = cgpu; proc; ) { if (proc->device != proc) { applog(LOG_WARNING, "%"PRIpreprv": Extra processor?", proc->proc_repr); continue; } i2cslave = atoi(proc->device_path); if (ioctl(i2c, I2C_SLAVE, i2cslave)) { applog(LOG_DEBUG, "%s: Failed to select i2c slave 0x%x", __func__, i2cslave); return false; } for (i = 0; i < max_cores; i += 0x20) { i2c_smbus_read_i2c_block_data(i2c, i, 0x20, buf); for (j = 0; j < 0x20; ++j) { mythr = proc->thr[0]; mythr->cgpu_data = knccore = malloc(sizeof(*knccore)); *knccore = (struct knc_core){ .asicno = i2cslave - 0x20, .coreno = i + j, .hwerr_in_row = 0, .hwerr_disable_time = KNC_HWERR_DISABLE_SECS, }; timer_set_now(&knccore->enable_at); proc->device_data = knc; switch (buf[j]) { case KNC_I2CSTATUS_ENABLED: break; default: // permanently disabled proc->deven = DEV_DISABLED; break; case KNC_I2CSTATUS_DISABLED: proc->deven = DEV_RECOVER_DRV; break; } proc = proc->next_proc; if ((!proc) || proc->device == proc) goto nomorecores; } } nomorecores: ; } spi = malloc(sizeof(*spi)); *knc = (struct knc_device){ .i2c = i2c, .spi = spi, .cgpu = cgpu, .workqueue_max = 1, }; /* Be careful, read spidevc.h comments for warnings */ memset(spi, 0, sizeof(*spi)); spi->txrx = knc_spi_txrx; spi->cgpu = cgpu; spi->repr = knc_drv.dname; spi->logprio = LOG_ERR; spi->speed = KNC_SPI_SPEED; spi->delay = KNC_SPI_DELAY; spi->mode = KNC_SPI_MODE; spi->bits = KNC_SPI_BITS; if (!knc_spi_open(cgpu->dev_repr, spi)) return false; knc_clean_flush(spi); timer_set_now(&thr->tv_poll); return true; } static void knc_set_queue_full(struct knc_device * const knc) { const bool full = (knc->workqueue_size >= knc->workqueue_max); struct cgpu_info *proc; for (proc = knc->cgpu; proc; proc = proc->next_proc) { struct thr_info * const thr = proc->thr[0]; thr->queue_full = full; } } static void knc_remove_local_queue(struct knc_device * const knc, struct work * const work) { DL_DELETE(knc->workqueue, work); free_work(work); --knc->workqueue_size; } static void knc_prune_local_queue(struct thr_info *thr) { struct cgpu_info * const cgpu = thr->cgpu; struct knc_device * const knc = cgpu->device_data; struct work *work, *tmp; DL_FOREACH_SAFE(knc->workqueue, work, tmp) { if (stale_work(work, false)) knc_remove_local_queue(knc, work); } knc_set_queue_full(knc); } static bool knc_queue_append(struct thr_info * const thr, struct work * const work) { struct cgpu_info * const cgpu = thr->cgpu; struct knc_device * const knc = cgpu->device_data; if (knc->workqueue_size >= knc->workqueue_max) { knc_prune_local_queue(thr); if (thr->queue_full) return false; } DL_APPEND(knc->workqueue, work); ++knc->workqueue_size; knc_set_queue_full(knc); if (thr->queue_full) knc_prune_local_queue(thr); return true; } #define HASH_LAST_ADDED(head, out) \ (out = (head) ? (ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail)) : NULL) static void knc_queue_flush(struct thr_info * const thr) { struct cgpu_info * const cgpu = thr->cgpu; struct knc_device * const knc = cgpu->device_data; struct work *work, *tmp; if (knc->cgpu != cgpu) return; DL_FOREACH_SAFE(knc->workqueue, work, tmp) { knc_remove_local_queue(knc, work); } knc_set_queue_full(knc); HASH_LAST_ADDED(knc->devicework, work); if (work && stale_work(work, true)) { knc->need_flush = true; timer_set_now(&thr->tv_poll); } } static inline uint16_t get_u16be(const void * const p) { const uint8_t * const b = p; return (((uint16_t)b[0]) << 8) | b[1]; } static inline uint32_t get_u32be(const void * const p) { const uint8_t * const b = p; return (((uint32_t)b[0]) << 0x18) | (((uint32_t)b[1]) << 0x10) | (((uint32_t)b[2]) << 8) | b[3]; } static void knc_poll(struct thr_info * const thr) { struct thr_info *mythr; struct cgpu_info * const cgpu = thr->cgpu, *proc; struct knc_device * const knc = cgpu->device_data; struct spi_port * const spi = knc->spi; struct knc_core *knccore; struct work *work, *tmp; uint8_t buf[0x30], *rxbuf; int works_sent = 0, asicno, i; uint16_t workaccept; int workid = knc->next_id; uint32_t nonce, coreno; size_t spi_req_sz = 0x1000; unsigned long delay_usecs = KNC_POLL_INTERVAL_US; knc_prune_local_queue(thr); spi_clear_buf(spi); if (knc->need_flush) { applog(LOG_NOTICE, "%s: Abandoning stale searches to restart", knc_drv.dname); buf[0] = KNC_REQ_FLUSH_QUEUE << 4; spi_emit_buf(spi, buf, sizeof(buf)); } DL_FOREACH(knc->workqueue, work) { buf[0] = KNC_REQ_SUBMIT_WORK << 4; buf[1] = 0; buf[2] = (workid >> 8) & 0x7f; buf[3] = workid & 0xff; for (i = 0; i < 0x20; ++i) buf[4 + i] = work->midstate[0x1f - i]; for (i = 0; i < 0xc; ++i) buf[0x24 + i] = work->data[0x4b - i]; spi_emit_buf(spi, buf, sizeof(buf)); ++works_sent; ++workid; } spi_emit_nop(spi, spi_req_sz - spi_getbufsz(spi)); spi_txrx(spi); rxbuf = spi_getrxbuf(spi); if (rxbuf[3] & 1) applog(LOG_DEBUG, "%s: Receive buffer overflow reported", knc_drv.dname); workaccept = get_u16be(&rxbuf[6]); applog(LOG_DEBUG, "%s: %lu/%d jobs accepted to queue (max=%d)", knc_drv.dname, (unsigned long)workaccept, works_sent, knc->workqueue_max); while (true) { rxbuf += 0xc; spi_req_sz -= 0xc; if (spi_req_sz < 0xc) break; const int rtype = rxbuf[0] >> 6; if (rtype && opt_debug) { char x[(0xc * 2) + 1]; bin2hex(x, rxbuf, 0xc); applog(LOG_DEBUG, "%s: RECV: %s", knc_drv.dname, x); } if (rtype != KNC_REPLY_NONCE_FOUND && rtype != KNC_REPLY_WORK_DONE) continue; asicno = (rxbuf[0] & 0x38) >> 3; coreno = get_u32be(&rxbuf[8]); proc = cgpu; while (true) { knccore = proc->thr[0]->cgpu_data; if (knccore->asicno == asicno) break; do { proc = proc->next_proc; } while(proc != proc->device); } for (i = 0; i < coreno; ++i) proc = proc->next_proc; mythr = proc->thr[0]; knccore = mythr->cgpu_data; i = get_u16be(&rxbuf[2]); HASH_FIND_INT(knc->devicework, &i, work); if (!work) { const char * const msgtype = (rtype == KNC_REPLY_NONCE_FOUND) ? "nonce found" : "work done"; applog(LOG_WARNING, "%"PRIpreprv": Got %s message about unknown work 0x%04x", proc->proc_repr, msgtype, i); if (KNC_REPLY_NONCE_FOUND == rtype) { nonce = get_u32be(&rxbuf[4]); nonce = le32toh(nonce); inc_hw_errors2(mythr, NULL, &nonce); } else inc_hw_errors2(mythr, NULL, NULL); continue; } switch (rtype) { case KNC_REPLY_NONCE_FOUND: nonce = get_u32be(&rxbuf[4]); nonce = le32toh(nonce); if (submit_nonce(mythr, work, nonce)) knccore->hwerr_in_row = 0; break; case KNC_REPLY_WORK_DONE: HASH_DEL(knc->devicework, work); free_work(work); hashes_done2(mythr, 0x100000000, NULL); break; } } if (knc->need_flush) { knc->need_flush = false; HASH_ITER(hh, knc->devicework, work, tmp) { HASH_DEL(knc->devicework, work); free_work(work); } delay_usecs = 0; } if (workaccept) { if (workaccept >= knc->workqueue_max) { knc->workqueue_max = workaccept; delay_usecs = 0; } DL_FOREACH_SAFE(knc->workqueue, work, tmp) { --knc->workqueue_size; DL_DELETE(knc->workqueue, work); work->device_id = knc->next_id++ & 0x7fff; HASH_ADD_INT(knc->devicework, device_id, work); if (!--workaccept) break; } knc_set_queue_full(knc); } timer_set_delay_from_now(&thr->tv_poll, delay_usecs); } static bool _knc_core_setstatus(struct thr_info * const thr, uint8_t val) { struct cgpu_info * const proc = thr->cgpu; struct knc_device * const knc = proc->device_data; struct knc_core * const knccore = thr->cgpu_data; const int i2c = knc->i2c; const int i2cslave = 0x20 + knccore->asicno; if (ioctl(i2c, I2C_SLAVE, i2cslave)) { applog(LOG_DEBUG, "%"PRIpreprv": %s: Failed to select i2c slave 0x%x", proc->proc_repr, __func__, i2cslave); return false; } return (-1 != i2c_smbus_write_byte_data(i2c, knccore->coreno, val)); } static void knc_core_disable(struct thr_info * const thr) { _knc_core_setstatus(thr, 0); } static void knc_core_enable(struct thr_info * const thr) { struct knc_core * const knccore = thr->cgpu_data; timer_set_now(&knccore->enable_at); _knc_core_setstatus(thr, 1); } static float knc_dcdc_decode_5_11(uint16_t raw) { if (raw == 0) return 0.0; int dcdc_vin_exp = (raw & 0xf800) >> 11; float dcdc_vin_man = raw & 0x07ff; if (dcdc_vin_exp >= 16) dcdc_vin_exp = -32 + dcdc_vin_exp; float dcdc_vin = dcdc_vin_man * exp2(dcdc_vin_exp); return dcdc_vin; } static void knc_hw_error(struct thr_info * const thr) { struct cgpu_info * const proc = thr->cgpu; struct knc_core * const knccore = thr->cgpu_data; if(knccore->hwerr_in_row == 0) timer_set_now(&knccore->first_hwerr); ++knccore->hwerr_in_row; if (knccore->hwerr_in_row >= KNC_MAX_HWERR_IN_ROW && proc->deven == DEV_ENABLED) { struct timeval now; timer_set_now(&now); float first_err_dt = tdiff(&now, &knccore->first_hwerr); float enable_dt = tdiff(&now, &knccore->enable_at); if(first_err_dt * 1.5 > enable_dt) { // didn't really do much good knccore->hwerr_disable_time *= 2; if (knccore->hwerr_disable_time > KNC_MAX_DISABLE_SECS) knccore->hwerr_disable_time = KNC_MAX_DISABLE_SECS; } else knccore->hwerr_disable_time = KNC_HWERR_DISABLE_SECS; proc->deven = DEV_RECOVER_DRV; applog(LOG_WARNING, "%"PRIpreprv": Disabled. %d hwerr in %.3f / %.3f . disabled %d s", proc->proc_repr, knccore->hwerr_in_row, enable_dt, first_err_dt, knccore->hwerr_disable_time); timer_set_delay_from_now(&knccore->enable_at, knccore->hwerr_disable_time * 1000000); } } static bool knc_get_stats(struct cgpu_info * const cgpu) { if (cgpu->device != cgpu) return true; struct thr_info *thr = cgpu->thr[0]; struct knc_core *knccore = thr->cgpu_data; struct cgpu_info *proc; const int i2cdev = knccore->asicno + 3; const int i2cslave_temp = 0x48; const int i2cslave_dcdc[] = {0x10, 0x12, 0x14, 0x17}; int die, i; int i2c; int32_t rawtemp, rawvolt, rawcurrent; float temp, volt, current; struct timeval tv_now; bool rv = false; char i2cpath[sizeof(KNC_I2C_TEMPLATE)]; sprintf(i2cpath, KNC_I2C_TEMPLATE, i2cdev); i2c = open(i2cpath, O_RDWR); if (i2c == -1) { applog(LOG_DEBUG, "%s: %s: Failed to open %s", cgpu->dev_repr, __func__, i2cpath); return false; } if (ioctl(i2c, I2C_SLAVE, i2cslave_temp)) { applog(LOG_DEBUG, "%s: %s: Failed to select i2c slave 0x%x", cgpu->dev_repr, __func__, i2cslave_temp); goto out; } rawtemp = i2c_smbus_read_word_data(i2c, 0); if (rawtemp == -1) goto out; temp = ((float)(rawtemp & 0xff)); if (rawtemp & 0x8000) temp += 0.5; /* DCDC i2c slaves are on 0x10 + [0-7] 8 DCDC boards have all populated 4 DCDC boards only have 0,2,4,7 populated Only 0,2,4,7 are used Each DCDC powers one die in the chip, each die has 48 cores Datasheet at http://www.lineagepower.com/oem/pdf/MDT040A0X.pdf */ timer_set_now(&tv_now); volt = current = 0; for (proc = cgpu, i = 0; proc && proc->device == cgpu; proc = proc->next_proc, ++i) { thr = proc->thr[0]; knccore = thr->cgpu_data; die = i / 0x30; if (0 == i % 0x30) { if (ioctl(i2c, I2C_SLAVE, i2cslave_dcdc[die])) { applog(LOG_DEBUG, "%s: %s: Failed to select i2c slave 0x%x", cgpu->dev_repr, __func__, i2cslave_dcdc[die]); goto out; } rawvolt = i2c_smbus_read_word_data(i2c, 0x8b); // VOUT if (rawvolt == -1) goto out; rawcurrent = i2c_smbus_read_word_data(i2c, 0x8c); // IOUT if (rawcurrent == -1) goto out; volt = (float)rawvolt * exp2(-10); current = (float)knc_dcdc_decode_5_11(rawcurrent); applog(LOG_DEBUG, "%s: die %d %6.3fV %5.2fA", cgpu->dev_repr, die, volt, current); } proc->temp = temp; knccore->volt = volt; knccore->current = current; // NOTE: We need to check _mt_disable_called because otherwise enabling won't assert it to i2c (it's false when getting stats for eg proc 0 before proc 1+ haven't initialised completely yet) if (proc->deven == DEV_RECOVER_DRV && timer_passed(&knccore->enable_at, &tv_now) && thr->_mt_disable_called) { knccore->hwerr_in_row = 0; proc_enable(proc); } } rv = true; out: close(i2c); return rv; } static struct api_data *knc_api_extra_device_status(struct cgpu_info * const cgpu) { struct api_data *root = NULL; struct thr_info * const thr = cgpu->thr[0]; struct knc_core * const knccore = thr->cgpu_data; root = api_add_volts(root, "Voltage", &knccore->volt, false); root = api_add_volts(root, "DCDC Current", &knccore->current, false); return root; } #ifdef HAVE_CURSES static void knc_wlogprint_status(struct cgpu_info * const cgpu) { struct thr_info * const thr = cgpu->thr[0]; struct knc_core * const knccore = thr->cgpu_data; wlogprint("Voltage: %.3f DCDC Current: %.3f\n", knccore->volt, knccore->current); } #endif struct device_drv knc_drv = { .dname = "knc", .name = "KNC", .drv_detect = knc_detect, .thread_init = knc_init, .thread_disable = knc_core_disable, .thread_enable = knc_core_enable, .minerloop = minerloop_queue, .queue_append = knc_queue_append, .queue_flush = knc_queue_flush, .poll = knc_poll, .hw_error = knc_hw_error, .get_stats = knc_get_stats, .get_api_extra_device_status = knc_api_extra_device_status, #ifdef HAVE_CURSES .proc_wlogprint_status = knc_wlogprint_status, #endif }; bfgminer-bfgminer-3.10.0/driver-littlefury.c000066400000000000000000000323421226556647300210370ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include "deviceapi.h" #include "driver-bitfury.h" #include "libbitfury.h" #include "logging.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "miner.h" #include "spidevc.h" #include "util.h" enum littlefury_opcode { LFOP_VERSION = 0, LFOP_SPI = 1, LFOP_REGVOLT = 2, LFOP_REGINFO = 3, LFOP_REGPWR = 4, LFOP_TEMP = 5, LFOP_LED = 6, LFOP_ADC = 7, }; BFG_REGISTER_DRIVER(littlefury_drv) static uint16_t crc16tab[] = { 0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7, 0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef, 0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6, 0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de, 0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485, 0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d, 0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4, 0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc, 0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823, 0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b, 0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12, 0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a, 0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41, 0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49, 0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70, 0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78, 0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f, 0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067, 0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e, 0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256, 0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d, 0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405, 0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c, 0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634, 0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab, 0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3, 0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a, 0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92, 0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9, 0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1, 0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8, 0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0, }; static uint16_t crc16_floating(uint16_t next_byte, uint16_t seed) { return ((seed << 8) ^ crc16tab[(seed >> 8) ^ next_byte]) & 0xFFFF; } static uint16_t crc16(void *p, size_t sz) { const uint8_t * const s = p; uint16_t crc = 0xFFFF; for (size_t i = 0; i < sz; ++i) crc = crc16_floating(s[i], crc); return crc; } static ssize_t keep_reading(int prio, int fd, void *buf, size_t count) { ssize_t r, rv = 0; while (count) { r = read(fd, buf, count); if (unlikely(r <= 0)) { applog(prio, "Read of fd %d returned %d", fd, (int)r); return rv ?: r; } rv += r; count -= r; buf += r; } return rv; } static bool bitfury_do_packet(int prio, const char *repr, const int fd, void * const buf, uint16_t * const bufsz, const uint8_t op, const void * const payload, const uint16_t payloadsz) { uint16_t crc; size_t sz; ssize_t r; uint8_t pkt[0x407]; bool b; { sz = 2 + 1 + 2 + payloadsz + 2; pkt[0] = 0xab; pkt[1] = 0xcd; pkt[2] = op; pkt[3] = payloadsz >> 8; pkt[4] = payloadsz & 0xff; if (payloadsz) memcpy(&pkt[5], payload, payloadsz); crc = crc16(&pkt[2], 3 + (size_t)payloadsz); pkt[sz - 2] = crc >> 8; pkt[sz - 1] = crc & 0xff; if (unlikely(opt_dev_protocol)) { char hex[(sz * 2) + 1]; bin2hex(hex, pkt, sz); applog(LOG_DEBUG, "%s: DEVPROTO: SEND %s", repr, hex); } r = write(fd, pkt, sz); if (sz != r) { applog(prio, "%s: Failed to write packet (%d bytes succeeded)", repr, (int)r); return false; } } { r = keep_reading(prio, fd, pkt, 5); if (5 != r || pkt[0] != 0xab || pkt[1] != 0xcd || pkt[2] != op) { char hex[(r * 2) + 1]; bin2hex(hex, pkt, r); applog(prio, "%s: DEVPROTO: RECV %s", repr, hex); applog(prio, "%s: Failed to read correct packet header", repr); return false; } sz = (((unsigned)pkt[3] << 8) | pkt[4]) + 2; r = keep_reading(prio, fd, &pkt[5], sz); if (sz != r) { r += 5; char hex[(r * 2) + 1]; bin2hex(hex, pkt, r); applog(prio, "%s: DEVPROTO: RECV %s", repr, hex); applog(prio, "%s: Failed to read packet payload (len=%d)", repr, (int)sz); return false; } crc = (pkt[sz + 3] << 8) | pkt[sz + 4]; b = (crc != crc16(&pkt[2], sz + 1)); if (unlikely(opt_dev_protocol || b)) { char hex[((sz + 5) * 2) + 1]; bin2hex(hex, pkt, sz + 5); applog(b ? prio : LOG_DEBUG, "%s: DEVPROTO: RECV %s", repr, hex); if (b) { applog(prio, "%s: Packet checksum mismatch", repr); return false; } } sz -= 2; memcpy(buf, &pkt[5], (*bufsz < sz ? *bufsz : sz)); *bufsz = sz; } return true; } static bool littlefury_txrx(struct spi_port *port) { const struct cgpu_info * const cgpu = port->cgpu; const void *wrbuf = spi_gettxbuf(port); void *rdbuf = spi_getrxbuf(port); size_t bufsz = spi_getbufsz(port); uint16_t rbufsz, xfer; const int logprio = port->logprio; const char * const repr = port->repr; const int fd = cgpu->device->device_fd; rbufsz = 1; if (!bitfury_do_packet(logprio, repr, fd, rdbuf, &rbufsz, LFOP_SPI, NULL, 0)) return false; while (bufsz) { xfer = (bufsz > 1024) ? 1024 : bufsz; rbufsz = xfer; if (!bitfury_do_packet(logprio, repr, fd, rdbuf, &rbufsz, LFOP_SPI, wrbuf, xfer)) return false; if (rbufsz < xfer) { applog(port->logprio, "%s: SPI: Got fewer bytes back than sent (%d < %d)", repr, rbufsz, xfer); return false; } bufsz -= xfer; rdbuf += xfer; wrbuf += xfer; } return true; } static bool littlefury_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "LittleFury"); } static int littlefury_chip_count(struct cgpu_info * const info) { /* Do not allocate spi_port on the stack! OS X, at least, has a 512 KB default stack size for secondary threads */ struct spi_port *spi = malloc(sizeof(*spi)); spi->txrx = littlefury_txrx; spi->cgpu = info; spi->repr = littlefury_drv.dname; spi->logprio = LOG_DEBUG; const int chip_count = libbitfury_detectChips1(spi); free(spi); return chip_count; } static bool littlefury_detect_one(const char *devpath) { int fd, chips; uint8_t buf[255]; uint16_t bufsz; struct cgpu_info dummy; char *devname; fd = serial_open(devpath, 0, 10, true); applog(LOG_DEBUG, "%s: %s %s", littlefury_drv.dname, ((fd == -1) ? "Failed to open" : "Successfully opened"), devpath); if (unlikely(fd == -1)) goto err; bufsz = sizeof(buf); if (!bitfury_do_packet(LOG_DEBUG, littlefury_drv.dname, fd, buf, &bufsz, LFOP_VERSION, NULL, 0)) goto err; if (bufsz < 4) { applog(LOG_DEBUG, "%s: Incomplete version response", littlefury_drv.dname); goto err; } devname = malloc(bufsz - 3); memcpy(devname, (char*)&buf[4], bufsz - 4); devname[bufsz - 4] = '\0'; applog(LOG_DEBUG, "%s: Identified %s %d.%d.%d (features %02x)", littlefury_drv.dname, devname, buf[0], buf[1], buf[2], buf[3]); bufsz = sizeof(buf); if (!(bitfury_do_packet(LOG_DEBUG, littlefury_drv.dname, fd, buf, &bufsz, LFOP_REGPWR, "\1", 1) && bufsz && buf[0])) applog(LOG_WARNING, "%s: Unable to power on chip(s) for %s", littlefury_drv.dname, devpath); dummy.device = &dummy; dummy.device_fd = fd; chips = littlefury_chip_count(&dummy); if (!chips) { applog(LOG_WARNING, "%s: No Bitfury chips detected on %s", littlefury_drv.dname, devpath); free(devname); goto err; } else { applog(LOG_DEBUG, "%s: %d chips detected", littlefury_drv.dname, chips); } bufsz = sizeof(buf); bitfury_do_packet(LOG_DEBUG, littlefury_drv.dname, fd, buf, &bufsz, LFOP_REGPWR, "\0", 1); serial_close(fd); struct cgpu_info *cgpu; cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &littlefury_drv, .device_path = strdup(devpath), .deven = DEV_ENABLED, .procs = chips, .threads = 1, .name = devname, .cutofftemp = 85, }; // NOTE: Xcode's clang has a bug where it cannot find fields inside anonymous unions (more details in fpgautils) cgpu->device_fd = -1; return add_cgpu(cgpu); err: if (fd != -1) serial_close(fd); return false; } static bool littlefury_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, littlefury_detect_one); } static bool littlefury_thread_init(struct thr_info *thr) { struct cgpu_info * const cgpu = thr->cgpu; struct cgpu_info *proc; struct spi_port *spi; struct bitfury_device *bitfury; int i = 0; for (proc = cgpu; proc; proc = proc->next_proc) { spi = malloc(sizeof(*spi)); /* Be careful, read spidevc.h comments for warnings */ memset(spi, 0, sizeof(*spi)); spi->txrx = littlefury_txrx; spi->cgpu = proc; spi->repr = proc->proc_repr; spi->logprio = LOG_ERR; bitfury = malloc(sizeof(*bitfury)); *bitfury = (struct bitfury_device){ .spi = spi, .fasync = i++, }; proc->device_data = bitfury; bitfury->osc6_bits = 50; } timer_set_now(&thr->tv_poll); cgpu->status = LIFE_INIT2; return true; } static void littlefury_disable(struct thr_info * const thr) { struct cgpu_info *proc = thr->cgpu; struct cgpu_info * const dev = proc->device; bitfury_disable(thr); // If all chips disabled, kill power and close device bool any_running = false; for (proc = dev; proc; proc = proc->next_proc) if (proc->deven == DEV_ENABLED && !proc->thr[0]->pause) { any_running = true; break; } if (!any_running) { uint8_t buf[1]; uint16_t bufsz = 1; if (!(bitfury_do_packet(LOG_ERR, dev->dev_repr, dev->device_fd, buf, &bufsz, LFOP_REGPWR, "\0", 1) && bufsz && !buf[0])) applog(LOG_WARNING, "%s: Unable to power off chip(s)", dev->dev_repr); serial_close(dev->device_fd); dev->device_fd = -1; timer_unset(&dev->thr[0]->tv_poll); } } static void littlefury_enable(struct thr_info * const thr) { struct cgpu_info *proc = thr->cgpu; struct cgpu_info * const dev = proc->device; struct thr_info * const master_thr = dev->thr[0]; if (!timer_isset(&master_thr->tv_poll)) timer_set_now(&master_thr->tv_poll); } static void littlefury_shutdown(struct thr_info *thr) { struct cgpu_info * const cgpu = thr->cgpu; const int fd = cgpu->device->device_fd; uint8_t buf[1]; uint16_t bufsz = 1; bitfury_shutdown(thr); if (!(bitfury_do_packet(LOG_ERR, cgpu->dev_repr, fd, buf, &bufsz, LFOP_REGPWR, "\0", 1) && bufsz && !buf[0])) applog(LOG_WARNING, "%s: Unable to power off chip(s)", cgpu->dev_repr); } static void littlefury_common_error(struct cgpu_info * const dev, const enum dev_reason reason) { for (struct cgpu_info *proc = dev; proc; proc = proc->next_proc) { struct thr_info * const thr = proc->thr[0]; dev_error(proc, reason); inc_hw_errors_only(thr); } } static void littlefury_poll(struct thr_info * const master_thr) { struct cgpu_info * const dev = master_thr->cgpu, *proc; int fd = dev->device_fd; if (unlikely(fd == -1)) { uint8_t buf[1]; uint16_t bufsz = 1; fd = serial_open(dev->device_path, 0, 10, true); if (unlikely(fd == -1)) { applog(LOG_ERR, "%s: Failed to open %s", dev->dev_repr, dev->device_path); littlefury_common_error(dev, REASON_THREAD_FAIL_INIT); return; } if (!(bitfury_do_packet(LOG_DEBUG, littlefury_drv.dname, fd, buf, &bufsz, LFOP_REGPWR, "\1", 1) && bufsz && buf[0])) { applog(LOG_ERR, "%s: Unable to power on chip(s)", dev->dev_repr); serial_close(fd); littlefury_common_error(dev, REASON_THREAD_FAIL_INIT); return; } dev->device_fd = fd; for (proc = dev; proc; proc = proc->next_proc) { if (proc->deven != DEV_ENABLED || proc->thr[0]->pause) continue; struct bitfury_device * const bitfury = proc->device_data; bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury_init_chip(proc); } } return bitfury_do_io(master_thr); } static void littlefury_reinit(struct cgpu_info * const proc) { timer_set_now(&proc->thr[0]->tv_poll); } struct device_drv littlefury_drv = { .dname = "littlefury", .name = "LFY", .lowl_match = littlefury_lowl_match, .lowl_probe = littlefury_lowl_probe, .thread_init = littlefury_thread_init, .thread_disable = littlefury_disable, .thread_enable = littlefury_enable, .reinit_device = littlefury_reinit, .thread_shutdown = littlefury_shutdown, .minerloop = minerloop_async, .job_prepare = bitfury_job_prepare, .job_start = bitfury_noop_job_start, .poll = littlefury_poll, .job_process_results = bitfury_job_process_results, .get_api_extra_device_detail = bitfury_api_device_detail, .get_api_extra_device_status = bitfury_api_device_status, .set_device = bitfury_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = bitfury_wlogprint_status, .proc_tui_wlogprint_choices = bitfury_tui_wlogprint_choices, .proc_tui_handle_choice = bitfury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-metabank.c000066400000000000000000000141761226556647300204230ustar00rootroot00000000000000/* * Copyright 2013 bitfury * Copyright 2013 Anatoly Legkodymov * Copyright 2013 Luke Dashjr * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config.h" #include #include "deviceapi.h" #include "driver-bitfury.h" #include "libbitfury.h" #include "spidevc.h" #include "tm_i2c.h" BFG_REGISTER_DRIVER(metabank_drv) static bool metabank_spi_txrx(struct spi_port *port) { static int current_slot = -1; struct cgpu_info * const proc = port->cgpu; struct bitfury_device * const bitfury = proc->device_data; if (current_slot != bitfury->slot) { if (current_slot != -1) tm_i2c_clear_oe(current_slot); tm_i2c_set_oe(bitfury->slot); current_slot = bitfury->slot; } const bool rv = sys_spi_txrx(port); return rv; } static int metabank_autodetect() { RUNONCE(0); struct cgpu_info *cgpu = NULL, *proc1 = NULL, *prev_cgpu = NULL; struct bitfury_device **devicelist, *bitfury; struct spi_port *port; int i, j; int proc_count = 0; bool slot_on[32]; struct bitfury_device dummy_bitfury; struct cgpu_info dummy_cgpu; applog(LOG_INFO, "INFO: bitfury_detect"); spi_init(); if (!sys_spi) return 0; if (tm_i2c_init() < 0) { applog(LOG_DEBUG, "%s: I2C init error", metabank_drv.dname); return 0; } dummy_cgpu.device_data = &dummy_bitfury; for (i = 0; i < 32; i++) { slot_on[i] = 0; } for (i = 0; i < 32; i++) { int slot_detected = tm_i2c_detect(i) != -1; slot_on[i] = slot_detected; tm_i2c_clear_oe(i); cgsleep_ms(1); } for (i = 0; i < 32; i++) { if (slot_on[i]) { int chip_n; port = malloc(sizeof(*port)); *port = *sys_spi; port->cgpu = &dummy_cgpu; port->txrx = metabank_spi_txrx; dummy_bitfury.slot = i; chip_n = libbitfury_detectChips1(port); if (chip_n) { applog(LOG_WARNING, "BITFURY slot %d: %d chips detected", i, chip_n); devicelist = malloc(sizeof(*devicelist) * chip_n); for (j = 0; j < chip_n; ++j) { devicelist[j] = bitfury = malloc(sizeof(*bitfury)); *bitfury = (struct bitfury_device){ .spi = port, .slot = i, .fasync = j, }; } cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &metabank_drv, .procs = chip_n, .device_data = devicelist, .cutofftemp = 50, }; add_cgpu_slave(cgpu, prev_cgpu); proc_count += chip_n; if (!proc1) proc1 = cgpu; prev_cgpu = cgpu; } else free(port); } } if (proc1) proc1->threads = 1; return proc_count; } static void metabank_detect(void) { noserial_detect_manual(&metabank_drv, metabank_autodetect); } static bool metabank_init(struct thr_info *thr) { struct bitfury_device **devicelist; struct cgpu_info *proc; struct bitfury_device *bitfury; for (proc = thr->cgpu; proc; proc = proc->next_proc) { devicelist = proc->device_data; bitfury = devicelist[proc->proc_id]; proc->device_data = bitfury; bitfury->spi->cgpu = proc; bitfury_init_chip(proc); bitfury->osc6_bits = 53; bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury_init_freq_stat(&bitfury->chip_stat, 52, 56); if (proc->proc_id == proc->procs - 1) free(devicelist); } timer_set_now(&thr->tv_poll); return true; } static void metabank_shutdown(struct thr_info *thr) { bitfury_shutdown(thr); tm_i2c_close(); } static bool metabank_get_stats(struct cgpu_info *cgpu) { struct bitfury_device * const bitfury = cgpu->device_data; float t; t = tm_i2c_gettemp(bitfury->slot) * 0.1; if (t < -27) //Sometimes tm_i2c_gettemp() returns strange result, ignoring it. return false; cgpu->temp = t; return true; } static struct api_data *metabank_api_extra_device_detail(struct cgpu_info *cgpu) { struct api_data *root = NULL; struct bitfury_device * const bitfury = cgpu->device_data; root = bitfury_api_device_detail(cgpu); root = api_add_uint(root, "Slot", &(bitfury->slot), false); return root; } static struct api_data *metabank_api_extra_device_status(struct cgpu_info *cgpu) { struct api_data *root = NULL; float vc0, vc1; struct bitfury_device * const bitfury = cgpu->device_data; root = bitfury_api_device_status(cgpu); vc0 = tm_i2c_getcore0(bitfury->slot); vc1 = tm_i2c_getcore1(bitfury->slot); root = api_add_volts(root, "Slot VC0", &vc0, true); root = api_add_volts(root, "Slot VC1", &vc1, true); return root; } struct device_drv metabank_drv = { .dname = "metabank", .name = "MBF", .drv_detect = metabank_detect, .thread_init = metabank_init, .thread_enable = bitfury_enable, .thread_disable = bitfury_disable, .minerloop = minerloop_async, .job_prepare = bitfury_job_prepare, .job_start = bitfury_noop_job_start, .poll = bitfury_do_io, .job_process_results = bitfury_job_process_results, .thread_shutdown = metabank_shutdown, .get_api_extra_device_detail = metabank_api_extra_device_detail, .get_api_extra_device_status = metabank_api_extra_device_status, .get_stats = metabank_get_stats, .set_device = bitfury_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = bitfury_wlogprint_status, .proc_tui_wlogprint_choices = bitfury_tui_wlogprint_choices, .proc_tui_handle_choice = bitfury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-modminer.c000066400000000000000000000606401226556647300204500ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2012 Andrew Smith * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include "binloader.h" #include "compat.h" #include "dynclock.h" #include "logging.h" #include "miner.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "util.h" #define BITSTREAM_FILENAME "fpgaminer_x6500-overclocker-0402.bit" #define BISTREAM_USER_ID "\2\4$B" #define MODMINER_MAX_CLOCK 250 #define MODMINER_DEF_CLOCK 190 #define MODMINER_MIN_CLOCK 2 // Commands #define MODMINER_PING "\x00" #define MODMINER_GET_VERSION "\x01" #define MODMINER_FPGA_COUNT "\x02" // Commands + require FPGAid #define MODMINER_GET_IDCODE '\x03' #define MODMINER_GET_USERCODE '\x04' #define MODMINER_PROGRAM '\x05' #define MODMINER_SET_CLOCK '\x06' #define MODMINER_READ_CLOCK '\x07' #define MODMINER_SEND_WORK '\x08' #define MODMINER_CHECK_WORK '\x09' // One byte temperature reply #define MODMINER_TEMP1 '\x0a' #define FPGAID_ALL 4 BFG_REGISTER_DRIVER(modminer_drv) struct modminer_fpga_state { bool work_running; struct work running_work; struct work last_work; struct timeval tv_workstart; uint32_t hashes; char next_work_cmd[46]; struct dclk_data dclk; uint8_t freqMaxMaxM; // Number of nonces didn't meet pdiff 1, ever int bad_share_counter; // Number of nonces did meet pdiff 1, ever int good_share_counter; // Time the clock was last reduced due to temperature struct timeval tv_last_cutoff_reduced; unsigned char temp; unsigned char pdone; }; static inline bool _bailout(int fd, struct cgpu_info*modminer, int prio, const char *fmt, ...) FORMAT_SYNTAX_CHECK(printf, 4, 5); static inline bool _bailout(int fd, struct cgpu_info*modminer, int prio, const char *fmt, ...) { if (fd != -1) serial_close(fd); if (modminer) { pthread_mutex_t *mutexp = &modminer->device->device_mutex; modminer->device->device_fd = -1; mutex_unlock(mutexp); } va_list ap; char buf[LOGBUFSIZ]; va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); _applog(prio, buf); return false; } #define bailout(...) return _bailout(fd, NULL, __VA_ARGS__); // 45 noops sent when detecting, in case the device was left in "start job" reading static const char NOOP[] = MODMINER_PING "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"; static bool modminer_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "ModMiner"); } static bool modminer_detect_one(const char *devpath) { int fd = serial_open(devpath, 0, 10, true); if (unlikely(fd == -1)) bailout(LOG_DEBUG, "ModMiner detect: failed to open %s", devpath); char buf[0x100]; ssize_t len; // Sending a "ping" first, to workaround bug in new firmware betas (see issue #62) // Sending 45 noops, just in case the device was left in "start job" reading (void)(write(fd, NOOP, sizeof(NOOP)) ?:0); while (serial_read(fd, buf, sizeof(buf)) > 0) ; if (1 != write(fd, MODMINER_GET_VERSION, 1)) bailout(LOG_DEBUG, "ModMiner detect: write failed on %s (get version)", devpath); len = serial_read(fd, buf, sizeof(buf)-1); if (len < 1) bailout(LOG_DEBUG, "ModMiner detect: no response to version request from %s", devpath); buf[len] = '\0'; if (strncasecmp(buf, "ModMiner", 8)) bailout(LOG_DEBUG, "%s: %s: response did not begin with 'ModMiner'", __func__, devpath); char*devname = strdup(buf); applog(LOG_DEBUG, "ModMiner identified as: %s", devname); if (serial_claim_v(devpath, &modminer_drv)) { serial_close(fd); return false; } if (1 != write(fd, MODMINER_FPGA_COUNT, 1)) bailout(LOG_DEBUG, "ModMiner detect: write failed on %s (get FPGA count)", devpath); len = read(fd, buf, 1); if (len < 1) bailout(LOG_ERR, "ModMiner detect: timeout waiting for FPGA count from %s", devpath); if (!buf[0]) bailout(LOG_ERR, "ModMiner detect: zero FPGAs reported on %s", devpath); applog(LOG_DEBUG, "ModMiner %s has %u FPGAs", devname, buf[0]); serial_close(fd); struct cgpu_info *modminer; modminer = calloc(1, sizeof(*modminer)); modminer->drv = &modminer_drv; mutex_init(&modminer->device_mutex); modminer->device_path = strdup(devpath); modminer->device_fd = -1; modminer->deven = DEV_ENABLED; modminer->procs = buf[0]; modminer->threads = buf[0]; modminer->name = devname; modminer->cutofftemp = 85; return add_cgpu(modminer); } #undef bailout static bool modminer_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, modminer_detect_one); } #define bailout(...) return _bailout(-1, modminer, __VA_ARGS__); #define bailout2(...) return _bailout(fd, modminer, __VA_ARGS__); #define bailout3(...) _bailout(fd, modminer, __VA_ARGS__); static bool modminer_reopen(struct cgpu_info*modminer) { serial_close(modminer->device->device_fd); int fd = serial_open(modminer->device_path, 0, 10, true); if (unlikely(-1 == fd)) { applog(LOG_ERR, "%s: Failed to reopen %s", modminer->dev_repr, modminer->device_path); return false; } modminer->device->device_fd = fd; return true; } #define safebailout() do { \ bool _safebailoutrv; \ state->work_running = false; \ _safebailoutrv = modminer_reopen(modminer); \ mutex_unlock(mutexp); \ return _safebailoutrv ? 0 : -1; \ } while(0) #define check_magic(L) do { \ if (1 != fread(buf, 1, 1, f)) \ bailout(LOG_ERR, "Error reading ModMiner bitstream ('%c')", L); \ if (buf[0] != L) \ bailout(LOG_ERR, "ModMiner bitstream has wrong magic ('%c')", L); \ } while(0) #define read_str(eng) do { \ if (1 != fread(buf, 2, 1, f)) \ bailout(LOG_ERR, "Error reading ModMiner bitstream (" eng " len)"); \ len = (ubuf[0] << 8) | ubuf[1]; \ if (len >= sizeof(buf)) \ bailout(LOG_ERR, "ModMiner bitstream " eng " too long"); \ if (1 != fread(buf, len, 1, f)) \ bailout(LOG_ERR, "Error reading ModMiner bitstream (" eng ")"); \ buf[len] = '\0'; \ } while(0) #define status_read(eng) do { \ FD_ZERO(&fds); \ FD_SET(fd, &fds); \ select(fd+1, &fds, NULL, NULL, NULL); \ if (1 != read(fd, buf, 1)) \ bailout2(LOG_ERR, "%s: Error programming %s (" eng ")", modminer->dev_repr, modminer->device_path); \ if (buf[0] != 1) \ bailout2(LOG_ERR, "%s: Wrong " eng " programming %s", modminer->dev_repr, modminer->device_path); \ } while(0) static bool modminer_fpga_upload_bitstream(struct cgpu_info*modminer) { struct modminer_fpga_state *state = modminer->thr[0]->cgpu_data; fd_set fds; char buf[0x100]; unsigned long len, flen; char fpgaid = FPGAID_ALL; FILE *f = open_xilinx_bitstream(modminer->drv->dname, modminer->dev_repr, BITSTREAM_FILENAME, &len); if (!f) return false; flen = len; int fd = modminer->device->device_fd; applog(LOG_WARNING, "%s: Programming %s... DO NOT EXIT UNTIL COMPLETE", modminer->dev_repr, modminer->device_path); buf[0] = MODMINER_PROGRAM; buf[1] = fpgaid; buf[2] = (len >> 0) & 0xff; buf[3] = (len >> 8) & 0xff; buf[4] = (len >> 16) & 0xff; buf[5] = (len >> 24) & 0xff; if (6 != write(fd, buf, 6)) bailout2(LOG_ERR, "%s: Error programming %s (cmd)", modminer->dev_repr, modminer->device_path); status_read("cmd reply"); ssize_t buflen; char nextstatus = 10; while (len) { buflen = len < 32 ? len : 32; if (fread(buf, buflen, 1, f) != 1) bailout2(LOG_ERR, "%s: File underrun programming %s (%lu bytes left)", modminer->dev_repr, modminer->device_path, len); if (write(fd, buf, buflen) != buflen) bailout2(LOG_ERR, "%s: Error programming %s (data)", modminer->dev_repr, modminer->device_path); state->pdone = 100 - ((len * 100) / flen); if (state->pdone >= nextstatus) { nextstatus += 10; applog(LOG_WARNING, "%s: Programming %s... %d%% complete...", modminer->dev_repr, modminer->device_path, state->pdone); } status_read("status"); len -= buflen; } status_read("final status"); applog(LOG_WARNING, "%s: Done programming %s", modminer->dev_repr, modminer->device_path); return true; } static bool modminer_device_prepare(struct cgpu_info *modminer) { int fd = serial_open(modminer->device_path, 0, 10, true); if (unlikely(-1 == fd)) bailout(LOG_ERR, "%s: Failed to open %s", modminer->dev_repr, modminer->device_path); modminer->device->device_fd = fd; applog(LOG_INFO, "%s: Opened %s", modminer->dev_repr, modminer->device_path); return true; } #undef bailout static bool modminer_fpga_prepare(struct thr_info *thr) { struct cgpu_info *proc = thr->cgpu; struct cgpu_info *modminer = proc->device; // Don't need to lock the mutex here, since prepare runs from the main thread before the miner threads start if (modminer->device->device_fd == -1 && !modminer_device_prepare(modminer)) return false; struct modminer_fpga_state *state; state = thr->cgpu_data = calloc(1, sizeof(struct modminer_fpga_state)); dclk_prepare(&state->dclk); state->dclk.freqMinM = MODMINER_MIN_CLOCK / 2; state->next_work_cmd[0] = MODMINER_SEND_WORK; state->next_work_cmd[1] = proc->proc_id; // FPGA id proc->status = LIFE_INIT2; return true; } static bool modminer_change_clock(struct thr_info*thr, bool needlock, signed char delta) { struct cgpu_info*modminer = thr->cgpu; struct modminer_fpga_state *state = thr->cgpu_data; char fpgaid = modminer->proc_id; pthread_mutex_t *mutexp = &modminer->device->device_mutex; int fd; unsigned char cmd[6], buf[1]; unsigned char clk; clk = (state->dclk.freqM * 2) + delta; cmd[0] = MODMINER_SET_CLOCK; cmd[1] = fpgaid; cmd[2] = clk; cmd[3] = cmd[4] = cmd[5] = '\0'; if (needlock) mutex_lock(mutexp); fd = modminer->device->device_fd; if (6 != write(fd, cmd, 6)) bailout2(LOG_ERR, "%s: Error writing (set frequency)", modminer->proc_repr); if (serial_read(fd, &buf, 1) != 1) bailout2(LOG_ERR, "%s: Error reading (set frequency)", modminer->proc_repr); if (needlock) mutex_unlock(mutexp); if (buf[0]) state->dclk.freqM = clk / 2; else return false; return true; } static bool modminer_dclk_change_clock(struct thr_info*thr, int multiplier) { struct cgpu_info *modminer = thr->cgpu; struct modminer_fpga_state *state = thr->cgpu_data; uint8_t oldFreq = state->dclk.freqM; signed char delta = (multiplier - oldFreq) * 2; if (unlikely(!modminer_change_clock(thr, true, delta))) return false; dclk_msg_freqchange(modminer->proc_repr, oldFreq * 2, state->dclk.freqM * 2, NULL); return true; } static bool modminer_reduce_clock(struct thr_info*thr, bool needlock) { struct modminer_fpga_state *state = thr->cgpu_data; if (state->dclk.freqM <= MODMINER_MIN_CLOCK / 2) return false; return modminer_change_clock(thr, needlock, -2); } static bool _modminer_get_nonce(struct cgpu_info*modminer, char fpgaid, uint32_t*nonce) { int fd = modminer->device->device_fd; char cmd[2] = {MODMINER_CHECK_WORK, fpgaid}; if (write(fd, cmd, 2) != 2) { applog(LOG_ERR, "%s: Error writing (get nonce)", modminer->proc_repr); return false; } if (4 != serial_read(fd, nonce, 4)) { applog(LOG_ERR, "%s: Short read (get nonce)", modminer->proc_repr); return false; } return true; } static bool modminer_fpga_init(struct thr_info *thr) { struct cgpu_info *modminer = thr->cgpu; struct modminer_fpga_state *state = thr->cgpu_data; int fd; char fpgaid = modminer->proc_id; pthread_mutex_t *mutexp = &modminer->device->device_mutex; uint32_t nonce; unsigned char cmd[2], buf[4]; mutex_lock(mutexp); fd = modminer->device->device_fd; if (fd == -1) { // Died in another thread... mutex_unlock(mutexp); return false; } cmd[0] = MODMINER_GET_USERCODE; cmd[1] = fpgaid; if (write(fd, cmd, 2) != 2) bailout2(LOG_ERR, "%s: Error writing (read USER code)", modminer->proc_repr); if (serial_read(fd, buf, 4) != 4) bailout2(LOG_ERR, "%s: Error reading (read USER code)", modminer->proc_repr); if (memcmp(buf, BISTREAM_USER_ID, 4)) { applog(LOG_ERR, "%s: FPGA not programmed", modminer->proc_repr); if (!modminer_fpga_upload_bitstream(modminer)) return false; } else if (opt_force_dev_init && !((struct modminer_fpga_state *)modminer->device->thr[0]->cgpu_data)->pdone) { applog(LOG_DEBUG, "%s: FPGA is already programmed, but --force-dev-init is set", modminer->proc_repr); if (!modminer_fpga_upload_bitstream(modminer)) return false; } else applog(LOG_DEBUG, "%s: FPGA is already programmed :)", modminer->proc_repr); state->pdone = 101; state->dclk.freqM = MODMINER_MAX_CLOCK / 2 + 1; // Will be reduced immediately while (1) { if (state->dclk.freqM <= MODMINER_MIN_CLOCK / 2) bailout2(LOG_ERR, "%s: Hit minimum trying to find acceptable frequencies", modminer->proc_repr); --state->dclk.freqM; if (!modminer_change_clock(thr, false, 0)) // MCU rejected assignment continue; if (!_modminer_get_nonce(modminer, fpgaid, &nonce)) bailout2(LOG_ERR, "%s: Error detecting acceptable frequencies", modminer->proc_repr); if (!memcmp(&nonce, "\x00\xff\xff\xff", 4)) // MCU took assignment, but disabled FPGA continue; break; } state->freqMaxMaxM = state->dclk.freqMaxM = state->dclk.freqM; if (MODMINER_DEF_CLOCK / 2 < state->dclk.freqM) { if (!modminer_change_clock(thr, false, -(state->dclk.freqM * 2 - MODMINER_DEF_CLOCK))) applog(LOG_WARNING, "%s: Failed to set desired initial frequency of %u", modminer->proc_repr, MODMINER_DEF_CLOCK); } state->dclk.freqMDefault = state->dclk.freqM; applog(LOG_WARNING, "%s: Frequency set to %u MHz (range: %u-%u)", modminer->proc_repr, state->dclk.freqM * 2, MODMINER_MIN_CLOCK, state->dclk.freqMaxM * 2); mutex_unlock(mutexp); thr->primary_thread = true; return true; } static bool get_modminer_upload_percent(char *buf, size_t bufsz, struct cgpu_info *modminer, __maybe_unused bool per_processor) { char pdone = ((struct modminer_fpga_state*)(modminer->device->thr[0]->cgpu_data))->pdone; if (pdone != 101) { tailsprintf(buf, bufsz, "%3d%% ", pdone); return true; } return false; } static void modminer_get_temperature(struct cgpu_info *modminer, struct thr_info *thr) { struct modminer_fpga_state *state = thr->cgpu_data; #ifdef WIN32 /* Workaround for bug in Windows driver */ if (!modminer_reopen(modminer)) return; #endif int fd = modminer->device->device_fd; int fpgaid = modminer->proc_id; char cmd[2] = {MODMINER_TEMP1, fpgaid}; char temperature; if (2 == write(fd, cmd, 2) && read(fd, &temperature, 1) == 1) { state->temp = temperature; if (temperature > modminer->targettemp + opt_hysteresis) { { struct timeval now; cgtime(&now); if (timer_elapsed(&state->tv_last_cutoff_reduced, &now)) { state->tv_last_cutoff_reduced = now; int oldFreq = state->dclk.freqM; if (modminer_reduce_clock(thr, false)) applog(LOG_NOTICE, "%s: Frequency %s from %u to %u MHz (temp: %d)", modminer->proc_repr, (oldFreq > state->dclk.freqM ? "dropped" : "raised "), oldFreq * 2, state->dclk.freqM * 2, temperature ); state->dclk.freqMaxM = state->dclk.freqM; } } } else if (state->dclk.freqMaxM < state->freqMaxMaxM && temperature < modminer->targettemp) { if (temperature < modminer->targettemp - opt_hysteresis) { state->dclk.freqMaxM = state->freqMaxMaxM; } else { ++state->dclk.freqMaxM; } } } } static bool modminer_get_stats(struct cgpu_info *modminer) { pthread_mutex_t *mutexp = &modminer->device->device_mutex; int hottest = 0; bool get_temp = (modminer->deven != DEV_ENABLED); // Getting temperature more efficiently while enabled for (int i = modminer->threads; i--; ) { struct thr_info*thr = modminer->thr[i]; struct modminer_fpga_state *state = thr->cgpu_data; if (get_temp) { mutex_lock(mutexp); modminer_get_temperature(modminer, thr); mutex_unlock(mutexp); } int temp = state->temp; if (temp > hottest) hottest = temp; } modminer->temp = (float)hottest; return true; } static struct api_data* get_modminer_drv_extra_device_status(struct cgpu_info*modminer) { struct api_data*root = NULL; struct thr_info*thr = modminer->thr[0]; struct modminer_fpga_state *state = thr->cgpu_data; double d; d = (double)state->dclk.freqM * 2; root = api_add_freq(root, "Frequency", &d, true); d = (double)state->dclk.freqMaxM * 2; root = api_add_freq(root, "Cool Max Frequency", &d, true); d = (double)state->freqMaxMaxM * 2; root = api_add_freq(root, "Max Frequency", &d, true); root = api_add_int(root, "Hardware Errors", &state->bad_share_counter, true); root = api_add_int(root, "Valid Nonces", &state->good_share_counter, true); return root; } static bool modminer_prepare_next_work(struct modminer_fpga_state*state, struct work*work) { char *midstate = state->next_work_cmd + 2; char *taildata = midstate + 32; if (!(memcmp(midstate, work->midstate, 32) || memcmp(taildata, work->data + 64, 12))) return false; memcpy(midstate, work->midstate, 32); memcpy(taildata, work->data + 64, 12); return true; } static bool modminer_start_work(struct thr_info*thr) { fd_set fds; struct cgpu_info*modminer = thr->cgpu; struct modminer_fpga_state *state = thr->cgpu_data; pthread_mutex_t *mutexp = &modminer->device->device_mutex; int fd; char buf[1]; mutex_lock(mutexp); fd = modminer->device->device_fd; if (unlikely(fd == -1)) { if (!modminer_reopen(modminer)) { mutex_unlock(mutexp); return false; } fd = modminer->device->device_fd; } if (46 != write(fd, state->next_work_cmd, 46)) bailout2(LOG_ERR, "%s: Error writing (start work)", modminer->proc_repr); timer_set_now(&state->tv_workstart); state->hashes = 0; status_read("start work"); mutex_unlock(mutexp); if (opt_debug) { char xdata[161]; bin2hex(xdata, state->running_work.data, 80); applog(LOG_DEBUG, "%s: Started work: %s", modminer->proc_repr, xdata); } return true; } #define work_restart(thr) thr->work_restart #define NONCE_CHARS(nonce) \ (int)((unsigned char*)&nonce)[3], \ (int)((unsigned char*)&nonce)[2], \ (int)((unsigned char*)&nonce)[1], \ (int)((unsigned char*)&nonce)[0] static int64_t modminer_process_results(struct thr_info*thr) { struct cgpu_info*modminer = thr->cgpu; struct modminer_fpga_state *state = thr->cgpu_data; char fpgaid = modminer->proc_id; pthread_mutex_t *mutexp = &modminer->device->device_mutex; struct work *work = &state->running_work; uint32_t nonce; long iter; int immediate_bad_nonces = 0, immediate_nonces = 0; bool bad; mutex_lock(mutexp); modminer_get_temperature(modminer, thr); iter = 200; while (1) { if (!_modminer_get_nonce(modminer, fpgaid, &nonce)) safebailout(); mutex_unlock(mutexp); if (memcmp(&nonce, "\xff\xff\xff\xff", 4)) { nonce = le32toh(nonce); bad = !test_nonce(work, nonce, false); ++immediate_nonces; if (!bad) applog(LOG_DEBUG, "%s: Nonce for current work: %02x%02x%02x%02x", modminer->proc_repr, NONCE_CHARS(nonce)); else if (test_nonce(&state->last_work, nonce, false)) { applog(LOG_DEBUG, "%s: Nonce for previous work: %02x%02x%02x%02x", modminer->proc_repr, NONCE_CHARS(nonce)); work = &state->last_work; bad = false; } if (!bad) { ++state->good_share_counter; submit_nonce(thr, work, nonce); } else { inc_hw_errors(thr, work, nonce); ++state->bad_share_counter; ++immediate_bad_nonces; } } if (work_restart(thr) || !--iter) break; cgsleep_ms(1); if (work_restart(thr)) break; mutex_lock(mutexp); } struct timeval tv_workend, elapsed; timer_set_now(&tv_workend); timersub(&tv_workend, &state->tv_workstart, &elapsed); uint64_t hashes = (uint64_t)state->dclk.freqM * 2 * (((uint64_t)elapsed.tv_sec * 1000000) + elapsed.tv_usec); if (hashes > 0xffffffff) { applog(LOG_WARNING, "%s: Finished work before new one sent", modminer->proc_repr); hashes = 0xffffffff; } if (hashes <= state->hashes) hashes = 1; else hashes -= state->hashes; state->hashes += hashes; dclk_gotNonces(&state->dclk); if (immediate_bad_nonces) dclk_errorCount(&state->dclk, ((double)immediate_bad_nonces) / (double)immediate_nonces); dclk_preUpdate(&state->dclk); if (!dclk_updateFreq(&state->dclk, modminer_dclk_change_clock, thr)) return -1; return hashes; } static int64_t modminer_scanhash(struct thr_info*thr, struct work*work, int64_t __maybe_unused max_nonce) { struct modminer_fpga_state *state = thr->cgpu_data; int64_t hashes = 0; bool startwork; startwork = modminer_prepare_next_work(state, work); if (startwork) { /* HACK: For some reason, this is delayed a bit * Let last_work handle the end of the work, * and start the next one immediately */ } else if (state->work_running) { hashes = modminer_process_results(thr); if (work_restart(thr)) { state->work_running = false; return hashes; } } else state->work_running = true; if (startwork) { __copy_work(&state->last_work, &state->running_work); __copy_work(&state->running_work, work); if (!modminer_start_work(thr)) return -1; } // This is intentionally early work->blk.nonce += hashes; return hashes; } static void modminer_fpga_shutdown(struct thr_info *thr) { for (struct cgpu_info *proc = thr->cgpu->device; proc; proc = proc->next_proc) proc->status = LIFE_DEAD2; free(thr->cgpu_data); thr->cgpu_data = NULL; } static bool modminer_user_set_clock(struct cgpu_info *cgpu, const int val) { struct thr_info * const thr = cgpu->thr[0]; struct modminer_fpga_state * const state = thr->cgpu_data; const int multiplier = val / 2; const uint8_t oldFreqM = state->dclk.freqM; const signed char delta = (multiplier - oldFreqM) * 2; state->dclk.freqMDefault = multiplier; const bool rv = modminer_change_clock(thr, true, delta); if (likely(rv)) dclk_msg_freqchange(cgpu->proc_repr, oldFreqM * 2, state->dclk.freqM * 2, " on user request"); return rv; } static char *modminer_set_device(struct cgpu_info *modminer, char *option, char *setting, char *replybuf) { int val; if (strcasecmp(option, "help") == 0) { sprintf(replybuf, "clock: range %d-%d and a multiple of 2", MODMINER_MIN_CLOCK, MODMINER_MAX_CLOCK); return replybuf; } if (strcasecmp(option, "clock") == 0) { if (!setting || !*setting) { sprintf(replybuf, "missing clock setting"); return replybuf; } val = atoi(setting); if (val < MODMINER_MIN_CLOCK || val > MODMINER_MAX_CLOCK || (val & 1) != 0) { sprintf(replybuf, "invalid clock: '%s' valid range %d-%d and a multiple of 2", setting, MODMINER_MIN_CLOCK, MODMINER_MAX_CLOCK); return replybuf; } if (unlikely(!modminer_user_set_clock(modminer, val))) { sprintf(replybuf, "Set clock failed: %s", modminer->proc_repr); return replybuf; } return NULL; } sprintf(replybuf, "Unknown option: %s", option); return replybuf; } #ifdef HAVE_CURSES static void modminer_tui_wlogprint_choices(struct cgpu_info *cgpu) { wlogprint("[C]lock speed "); } static const char *modminer_tui_handle_choice(struct cgpu_info *cgpu, int input) { static char buf[0x100]; // Static for replies switch (input) { case 'c': case 'C': { int val; char *intvar; sprintf(buf, "Set clock speed (range %d-%d, multiple of 2)", MODMINER_MIN_CLOCK, MODMINER_MAX_CLOCK); intvar = curses_input(buf); if (!intvar) return "Invalid clock speed\n"; val = atoi(intvar); free(intvar); if (val < MODMINER_MIN_CLOCK || val > MODMINER_MAX_CLOCK || (val & 1) != 0) return "Invalid clock speed\n"; if (unlikely(!modminer_user_set_clock(cgpu, val))) return "Set clock failed\n"; return "Clock speed changed\n"; } } return NULL; } static void modminer_wlogprint_status(struct cgpu_info *cgpu) { struct modminer_fpga_state *state = cgpu->thr[0]->cgpu_data; wlogprint("Clock speed: %d\n", (int)(state->dclk.freqM * 2)); } #endif struct device_drv modminer_drv = { .dname = "modminer", .name = "MMQ", .lowl_match = modminer_lowl_match, .lowl_probe = modminer_lowl_probe, .override_statline_temp2 = get_modminer_upload_percent, .get_stats = modminer_get_stats, .get_api_extra_device_status = get_modminer_drv_extra_device_status, .set_device = modminer_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = modminer_wlogprint_status, .proc_tui_wlogprint_choices = modminer_tui_wlogprint_choices, .proc_tui_handle_choice = modminer_tui_handle_choice, #endif .thread_prepare = modminer_fpga_prepare, .thread_init = modminer_fpga_init, .scanhash = modminer_scanhash, .thread_shutdown = modminer_fpga_shutdown, }; bfgminer-bfgminer-3.10.0/driver-nanofury.c000066400000000000000000000240751226556647300205010ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * Copyright 2013 Vladimir Strinski * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include "deviceapi.h" #include "driver-bitfury.h" #include "libbitfury.h" #include "logging.h" #include "lowlevel.h" #include "mcp2210.h" #include "miner.h" #include "util.h" #define NANOFURY_USB_PRODUCT "NanoFury" #define NANOFURY_GP_PIN_LED 0 #define NANOFURY_GP_PIN_SCK_OVR 5 #define NANOFURY_GP_PIN_PWR_EN 6 #define NANOFURY_MAX_BYTES_PER_SPI_TRANSFER 60 // due to MCP2210 limitation BFG_REGISTER_DRIVER(nanofury_drv) struct nanofury_state { struct mcp2210_device *mcp; struct timeval identify_started; bool identify_requested; }; // Bit-banging reset, to reset more chips in chain - toggle for longer period... Each 3 reset cycles reset first chip in chain static bool nanofury_spi_reset(struct mcp2210_device * const mcp) { int r; char tx[1] = {0x81}; // will send this waveform: - _ _ _ _ _ _ - char buf[1]; // SCK_OVRRIDE if (!mcp2210_set_gpio_output(mcp, NANOFURY_GP_PIN_SCK_OVR, MGV_HIGH)) return false; for (r = 0; r < 16; ++r) if (!mcp2210_spi_transfer(mcp, tx, buf, 1)) return false; if (mcp2210_get_gpio_input(mcp, NANOFURY_GP_PIN_SCK_OVR) == MGV_ERROR) return false; return true; } static void nanofury_device_off(struct mcp2210_device *); static bool nanofury_spi_txrx(struct spi_port * const port) { struct cgpu_info * const cgpu = port->cgpu; struct thr_info * const thr = cgpu->thr[0]; struct nanofury_state * const state = thr->cgpu_data; struct mcp2210_device * const mcp = state->mcp; const void *wrbuf = spi_gettxbuf(port); void *rdbuf = spi_getrxbuf(port); size_t bufsz = spi_getbufsz(port); const uint8_t *ptrwrbuf = wrbuf; uint8_t *ptrrdbuf = rdbuf; nanofury_spi_reset(mcp); // start by sending chunks of 60 bytes... while (bufsz >= NANOFURY_MAX_BYTES_PER_SPI_TRANSFER) { if (!mcp2210_spi_transfer(mcp, ptrwrbuf, ptrrdbuf, NANOFURY_MAX_BYTES_PER_SPI_TRANSFER)) goto err; ptrrdbuf += NANOFURY_MAX_BYTES_PER_SPI_TRANSFER; ptrwrbuf += NANOFURY_MAX_BYTES_PER_SPI_TRANSFER; bufsz -= NANOFURY_MAX_BYTES_PER_SPI_TRANSFER; } // send any remaining bytes... if (bufsz > 0) { if (!mcp2210_spi_transfer(mcp, ptrwrbuf, ptrrdbuf, bufsz)) goto err; } return true; err: mcp2210_spi_cancel(mcp); nanofury_device_off(mcp); hashes_done2(thr, -1, NULL); return false; } static void nanofury_device_off(struct mcp2210_device * const mcp) { // Try to reset everything back to input for (int i = 0; i < 9; ++i) mcp2210_get_gpio_input(mcp, i); } static bool nanofury_checkport(struct mcp2210_device * const mcp) { int i; const char tmp = 0; char tmprx; // default: set everything to input for (i = 0; i < 9; ++i) if (MGV_ERROR == mcp2210_get_gpio_input(mcp, i)) goto fail; // configure the pins that we need: // LED if (!mcp2210_set_gpio_output(mcp, NANOFURY_GP_PIN_LED, MGV_HIGH)) goto fail; // PWR_EN if (!mcp2210_set_gpio_output(mcp, NANOFURY_GP_PIN_PWR_EN, MGV_HIGH)) goto fail; // cancel any outstanding SPI transfers mcp2210_spi_cancel(mcp); // configure SPI // This is the only place where speed, mode and other settings are configured!!! if (!mcp2210_configure_spi(mcp, 200000, 0xffff, 0xffef, 0, 0, 0)) goto fail; if (!mcp2210_set_spimode(mcp, 0)) goto fail; if (!mcp2210_spi_transfer(mcp, &tmp, &tmprx, 1)) goto fail; // after this command SCK_OVRRIDE should read the same as current SCK value (which for mode 0 should be 0) if (mcp2210_get_gpio_input(mcp, NANOFURY_GP_PIN_SCK_OVR) != MGV_LOW) goto fail; // switch SCK to polarity (default SCK=1 in mode 2) if (!mcp2210_set_spimode(mcp, 2)) goto fail; if (!mcp2210_spi_transfer(mcp, &tmp, &tmprx, 1)) goto fail; // after this command SCK_OVRRIDE should read the same as current SCK value (which for mode 2 should be 1) if (mcp2210_get_gpio_input(mcp, NANOFURY_GP_PIN_SCK_OVR) != MGV_HIGH) goto fail; // switch SCK to polarity (default SCK=0 in mode 0) if (!mcp2210_set_spimode(mcp, 0)) goto fail; if (!mcp2210_spi_transfer(mcp, &tmp, &tmprx, 1)) goto fail; if (mcp2210_get_gpio_input(mcp, NANOFURY_GP_PIN_SCK_OVR) != MGV_LOW) goto fail; return true; fail: nanofury_device_off(mcp); return false; } static bool nanofury_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_lowlproduct(info, &lowl_mcp2210, NANOFURY_USB_PRODUCT); } static bool nanofury_lowl_probe(const struct lowlevel_device_info * const info) { const char * const product = info->product; const char * const serial = info->serial; struct mcp2210_device *mcp; if (info->lowl != &lowl_mcp2210) { if (info->lowl != &lowl_hid && info->lowl != &lowl_usb) applog(LOG_DEBUG, "%s: Matched \"%s\" serial \"%s\", but lowlevel driver is not mcp2210!", __func__, product, serial); return false; } mcp = mcp2210_open(info); if (!mcp) { applog(LOG_WARNING, "%s: Matched \"%s\" serial \"%s\", but mcp2210 lowlevel driver failed to open it", __func__, product, serial); return false; } if (!nanofury_checkport(mcp)) { applog(LOG_WARNING, "%s: Matched \"%s\" serial \"%s\", but failed to detect nanofury", __func__, product, serial); mcp2210_close(mcp); return false; } nanofury_device_off(mcp); mcp2210_close(mcp); if (lowlevel_claim(&nanofury_drv, true, info)) return false; struct cgpu_info *cgpu; cgpu = malloc(sizeof(*cgpu)); *cgpu = (struct cgpu_info){ .drv = &nanofury_drv, .device_data = lowlevel_ref(info), .threads = 1, // TODO: .name .device_path = strdup(info->path), .dev_manufacturer = maybe_strdup(info->manufacturer), .dev_product = maybe_strdup(product), .dev_serial = maybe_strdup(serial), .deven = DEV_ENABLED, // TODO: .cutofftemp }; return add_cgpu(cgpu); } static bool nanofury_init(struct thr_info * const thr) { struct cgpu_info * const cgpu = thr->cgpu; struct lowlevel_device_info * const info = cgpu->device_data; struct spi_port *port; struct bitfury_device *bitfury; struct mcp2210_device *mcp; struct nanofury_state *state; mcp = mcp2210_open(info); lowlevel_devinfo_free(info); if (!mcp) { applog(LOG_ERR, "%"PRIpreprv": Failed to open mcp2210 device", cgpu->proc_repr); return false; } if (!nanofury_checkport(mcp)) { applog(LOG_ERR, "%"PRIpreprv": checkport failed", cgpu->proc_repr); mcp2210_close(mcp); return false; } port = malloc(sizeof(*port)); bitfury = malloc(sizeof(*bitfury)); state = malloc(sizeof(*state)); if (!(port && bitfury && state)) { applog(LOG_ERR, "%"PRIpreprv": Failed to allocate structures", cgpu->proc_repr); free(port); free(bitfury); free(state); mcp2210_close(mcp); return false; } /* Be careful, read spidevc.h comments for warnings */ memset(port, 0, sizeof(*port)); port->txrx = nanofury_spi_txrx; port->cgpu = cgpu; port->repr = cgpu->proc_repr; port->logprio = LOG_ERR; *bitfury = (struct bitfury_device){ .spi = port, }; *state = (struct nanofury_state){ .mcp = mcp, }; cgpu->device_data = bitfury; thr->cgpu_data = state; bitfury->osc6_bits = 50; bitfury_send_reinit(bitfury->spi, bitfury->slot, bitfury->fasync, bitfury->osc6_bits); bitfury_init_chip(cgpu); timer_set_now(&thr->tv_poll); cgpu->status = LIFE_INIT2; return true; } static void nanofury_disable(struct thr_info * const thr) { struct nanofury_state * const state = thr->cgpu_data; struct mcp2210_device * const mcp = state->mcp; bitfury_disable(thr); nanofury_device_off(mcp); } static void nanofury_enable(struct thr_info * const thr) { struct nanofury_state * const state = thr->cgpu_data; struct mcp2210_device * const mcp = state->mcp; nanofury_checkport(mcp); bitfury_enable(thr); } static void nanofury_reinit(struct cgpu_info * const cgpu) { struct thr_info * const thr = cgpu->thr[0]; struct nanofury_state * const state = thr->cgpu_data; struct mcp2210_device * const mcp = state->mcp; nanofury_device_off(mcp); cgsleep_ms(1); nanofury_enable(thr); } static void nanofury_poll(struct thr_info * const thr) { struct nanofury_state * const state = thr->cgpu_data; struct mcp2210_device * const mcp = state->mcp; if (state->identify_requested) { if (!timer_isset(&state->identify_started)) // LED is normally on while mining, so turn it off for identify mcp2210_set_gpio_output(mcp, NANOFURY_GP_PIN_LED, MGV_LOW); timer_set_delay_from_now(&state->identify_started, 5000000); state->identify_requested = false; } bitfury_do_io(thr); if (timer_passed(&state->identify_started, NULL)) { mcp2210_set_gpio_output(mcp, NANOFURY_GP_PIN_LED, MGV_HIGH); timer_unset(&state->identify_started); } } static bool nanofury_identify(struct cgpu_info * const cgpu) { struct nanofury_state * const state = cgpu->thr[0]->cgpu_data; state->identify_requested = true; return true; } static void nanofury_shutdown(struct thr_info * const thr) { struct nanofury_state * const state = thr->cgpu_data; struct mcp2210_device * const mcp = state->mcp; if (mcp) nanofury_device_off(mcp); } struct device_drv nanofury_drv = { .dname = "nanofury", .name = "NFY", .lowl_match = nanofury_lowl_match, .lowl_probe = nanofury_lowl_probe, .thread_init = nanofury_init, .thread_disable = nanofury_disable, .thread_enable = nanofury_enable, .reinit_device = nanofury_reinit, .thread_shutdown = nanofury_shutdown, .minerloop = minerloop_async, .job_prepare = bitfury_job_prepare, .job_start = bitfury_noop_job_start, .poll = nanofury_poll, .job_process_results = bitfury_job_process_results, .get_api_extra_device_detail = bitfury_api_device_detail, .get_api_extra_device_status = bitfury_api_device_status, .set_device = bitfury_set_device, .identify_device = nanofury_identify, #ifdef HAVE_CURSES .proc_wlogprint_status = bitfury_wlogprint_status, .proc_tui_wlogprint_choices = bitfury_tui_wlogprint_choices, .proc_tui_handle_choice = bitfury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-opencl.c000066400000000000000000001327661226556647300201270ustar00rootroot00000000000000/* * Copyright 2011-2012 Con Kolivas * Copyright 2011-2013 Luke Dashjr * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef HAVE_CURSES // Must be before stdbool, since pdcurses typedefs bool :/ #include #endif #ifndef WIN32 #include #else #include #endif #include #include #include #include #ifndef WIN32 #include #endif #include #define OMIT_OPENCL_API #include "compat.h" #include "miner.h" #include "deviceapi.h" #include "driver-opencl.h" #include "findnonce.h" #include "ocl.h" #include "adl.h" #include "util.h" /* TODO: cleanup externals ********************/ #ifdef HAVE_OPENCL /* Platform API */ CL_API_ENTRY cl_int CL_API_CALL (*clGetPlatformIDs)(cl_uint /* num_entries */, cl_platform_id * /* platforms */, cl_uint * /* num_platforms */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clGetPlatformInfo)(cl_platform_id /* platform */, cl_platform_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Device APIs */ CL_API_ENTRY cl_int CL_API_CALL (*clGetDeviceIDs)(cl_platform_id /* platform */, cl_device_type /* device_type */, cl_uint /* num_entries */, cl_device_id * /* devices */, cl_uint * /* num_devices */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clGetDeviceInfo)(cl_device_id /* device */, cl_device_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Context APIs */ CL_API_ENTRY cl_context CL_API_CALL (*clCreateContextFromType)(const cl_context_properties * /* properties */, cl_device_type /* device_type */, void (CL_CALLBACK * /* pfn_notify*/ )(const char *, const void *, size_t, void *), void * /* user_data */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clReleaseContext)(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0; /* Command Queue APIs */ CL_API_ENTRY cl_command_queue CL_API_CALL (*clCreateCommandQueue)(cl_context /* context */, cl_device_id /* device */, cl_command_queue_properties /* properties */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clReleaseCommandQueue)(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; /* Memory Object APIs */ CL_API_ENTRY cl_mem CL_API_CALL (*clCreateBuffer)(cl_context /* context */, cl_mem_flags /* flags */, size_t /* size */, void * /* host_ptr */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; /* Program Object APIs */ CL_API_ENTRY cl_program CL_API_CALL (*clCreateProgramWithSource)(cl_context /* context */, cl_uint /* count */, const char ** /* strings */, const size_t * /* lengths */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_program CL_API_CALL (*clCreateProgramWithBinary)(cl_context /* context */, cl_uint /* num_devices */, const cl_device_id * /* device_list */, const size_t * /* lengths */, const unsigned char ** /* binaries */, cl_int * /* binary_status */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clReleaseProgram)(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clBuildProgram)(cl_program /* program */, cl_uint /* num_devices */, const cl_device_id * /* device_list */, const char * /* options */, void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */), void * /* user_data */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clGetProgramInfo)(cl_program /* program */, cl_program_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clGetProgramBuildInfo)(cl_program /* program */, cl_device_id /* device */, cl_program_build_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Kernel Object APIs */ CL_API_ENTRY cl_kernel CL_API_CALL (*clCreateKernel)(cl_program /* program */, const char * /* kernel_name */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clReleaseKernel)(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clSetKernelArg)(cl_kernel /* kernel */, cl_uint /* arg_index */, size_t /* arg_size */, const void * /* arg_value */) CL_API_SUFFIX__VERSION_1_0; /* Flush and Finish APIs */ CL_API_ENTRY cl_int CL_API_CALL (*clFinish)(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; /* Enqueued Commands APIs */ CL_API_ENTRY cl_int CL_API_CALL (*clEnqueueReadBuffer)(cl_command_queue /* command_queue */, cl_mem /* buffer */, cl_bool /* blocking_read */, size_t /* offset */, size_t /* size */, void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clEnqueueWriteBuffer)(cl_command_queue /* command_queue */, cl_mem /* buffer */, cl_bool /* blocking_write */, size_t /* offset */, size_t /* size */, const void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; CL_API_ENTRY cl_int CL_API_CALL (*clEnqueueNDRangeKernel)(cl_command_queue /* command_queue */, cl_kernel /* kernel */, cl_uint /* work_dim */, const size_t * /* global_work_offset */, const size_t * /* global_work_size */, const size_t * /* local_work_size */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; #ifdef WIN32 #define dlsym (void*)GetProcAddress #define dlclose FreeLibrary #endif #define LOAD_OCL_SYM(sym) do { \ if (!(sym = dlsym(cl, #sym))) { \ applog(LOG_ERR, "Failed to load OpenCL symbol " #sym ", no GPUs usable"); \ dlclose(cl); \ return false; \ } \ } while(0) static bool load_opencl_symbols() { #if defined(__APPLE__) void *cl = dlopen("/System/Library/Frameworks/OpenCL.framework/Versions/Current/OpenCL", RTLD_LAZY); #elif !defined(WIN32) void *cl = dlopen("libOpenCL.so", RTLD_LAZY); #else HMODULE cl = LoadLibrary("OpenCL.dll"); #endif if (!cl) { applog(LOG_ERR, "Failed to load OpenCL library, no GPUs usable"); return false; } LOAD_OCL_SYM(clGetPlatformIDs); LOAD_OCL_SYM(clGetPlatformInfo); LOAD_OCL_SYM(clGetDeviceIDs); LOAD_OCL_SYM(clGetDeviceInfo); LOAD_OCL_SYM(clCreateContextFromType); LOAD_OCL_SYM(clReleaseContext); LOAD_OCL_SYM(clCreateCommandQueue); LOAD_OCL_SYM(clReleaseCommandQueue); LOAD_OCL_SYM(clCreateBuffer); LOAD_OCL_SYM(clCreateProgramWithSource); LOAD_OCL_SYM(clCreateProgramWithBinary); LOAD_OCL_SYM(clReleaseProgram); LOAD_OCL_SYM(clBuildProgram); LOAD_OCL_SYM(clGetProgramInfo); LOAD_OCL_SYM(clGetProgramBuildInfo); LOAD_OCL_SYM(clCreateKernel); LOAD_OCL_SYM(clReleaseKernel); LOAD_OCL_SYM(clSetKernelArg); LOAD_OCL_SYM(clFinish); LOAD_OCL_SYM(clEnqueueReadBuffer); LOAD_OCL_SYM(clEnqueueWriteBuffer); LOAD_OCL_SYM(clEnqueueNDRangeKernel); return true; } #endif #ifdef HAVE_CURSES extern WINDOW *mainwin, *statuswin, *logwin; extern void enable_curses(void); #endif extern int mining_threads; extern int opt_g_threads; extern bool ping; extern bool opt_loginput; extern char *opt_kernel_path; extern int gpur_thr_id; extern bool opt_noadl; extern bool have_opencl; extern void *miner_thread(void *userdata); extern int dev_from_id(int thr_id); extern void decay_time(double *f, double fadd); /**********************************************/ #ifdef HAVE_ADL extern float gpu_temp(int gpu); extern int gpu_fanspeed(int gpu); extern int gpu_fanpercent(int gpu); #endif #ifdef HAVE_SENSORS #include struct opencl_device_data { const sensors_chip_name *sensor; }; #endif #ifdef HAVE_OPENCL char *set_vector(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set vector"; val = atoi(nextptr); if (val != 1 && val != 2 && val != 4) return "Invalid value passed to set_vector"; gpus[device++].vwidth = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); if (val != 1 && val != 2 && val != 4) return "Invalid value passed to set_vector"; gpus[device++].vwidth = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].vwidth = gpus[0].vwidth; } return NULL; } char *set_worksize(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set work size"; val = atoi(nextptr); if (val < 1 || val > 9999) return "Invalid value passed to set_worksize"; gpus[device++].work_size = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); if (val < 1 || val > 9999) return "Invalid value passed to set_worksize"; gpus[device++].work_size = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].work_size = gpus[0].work_size; } return NULL; } #ifdef USE_SCRYPT char *set_shaders(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set lookup gap"; val = atoi(nextptr); gpus[device++].shaders = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); gpus[device++].shaders = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].shaders = gpus[0].shaders; } return NULL; } char *set_lookup_gap(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set lookup gap"; val = atoi(nextptr); gpus[device++].opt_lg = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); gpus[device++].opt_lg = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].opt_lg = gpus[0].opt_lg; } return NULL; } char *set_thread_concurrency(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set thread concurrency"; val = atoi(nextptr); gpus[device++].opt_tc = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); gpus[device++].opt_tc = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].opt_tc = gpus[0].opt_tc; } return NULL; } #endif static enum cl_kernels select_kernel(char *arg) { if (!strcmp(arg, "diablo")) return KL_DIABLO; if (!strcmp(arg, "diakgcn")) return KL_DIAKGCN; if (!strcmp(arg, "poclbm")) return KL_POCLBM; if (!strcmp(arg, "phatk")) return KL_PHATK; #ifdef USE_SCRYPT if (!strcmp(arg, "scrypt")) return KL_SCRYPT; #endif return KL_NONE; } char *set_kernel(char *arg) { enum cl_kernels kern; int i, device = 0; char *nextptr; if (opt_scrypt) return "Cannot specify a kernel with scrypt"; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set kernel"; kern = select_kernel(nextptr); if (kern == KL_NONE) return "Invalid parameter to set_kernel"; gpus[device++].kernel = kern; while ((nextptr = strtok(NULL, ",")) != NULL) { kern = select_kernel(nextptr); if (kern == KL_NONE) return "Invalid parameter to set_kernel"; gpus[device++].kernel = kern; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].kernel = gpus[0].kernel; } return NULL; } #endif #ifdef HAVE_ADL /* This function allows us to map an adl device to an opencl device for when * simple enumeration has failed to match them. */ char *set_gpu_map(char *arg) { int val1 = 0, val2 = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu map"; if (sscanf(arg, "%d:%d", &val1, &val2) != 2) return "Invalid description for map pair"; if (val1 < 0 || val1 > MAX_GPUDEVICES || val2 < 0 || val2 > MAX_GPUDEVICES) return "Invalid value passed to set_gpu_map"; gpus[val1].virtual_adl = val2; gpus[val1].mapped = true; while ((nextptr = strtok(NULL, ",")) != NULL) { if (sscanf(nextptr, "%d:%d", &val1, &val2) != 2) return "Invalid description for map pair"; if (val1 < 0 || val1 > MAX_GPUDEVICES || val2 < 0 || val2 > MAX_GPUDEVICES) return "Invalid value passed to set_gpu_map"; gpus[val1].virtual_adl = val2; gpus[val1].mapped = true; } return NULL; } char *set_gpu_engine(char *arg) { int i, val1 = 0, val2 = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu engine"; get_intrange(nextptr, &val1, &val2); if (val1 < 0 || val1 > 9999 || val2 < 0 || val2 > 9999) return "Invalid value passed to set_gpu_engine"; gpus[device].min_engine = val1; gpus[device].gpu_engine = val2; device++; while ((nextptr = strtok(NULL, ",")) != NULL) { get_intrange(nextptr, &val1, &val2); if (val1 < 0 || val1 > 9999 || val2 < 0 || val2 > 9999) return "Invalid value passed to set_gpu_engine"; gpus[device].min_engine = val1; gpus[device].gpu_engine = val2; device++; } if (device == 1) { for (i = 1; i < MAX_GPUDEVICES; i++) { gpus[i].min_engine = gpus[0].min_engine; gpus[i].gpu_engine = gpus[0].gpu_engine; } } return NULL; } char *set_gpu_fan(char *arg) { int i, val1 = 0, val2 = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu fan"; get_intrange(nextptr, &val1, &val2); if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100) return "Invalid value passed to set_gpu_fan"; gpus[device].min_fan = val1; gpus[device].gpu_fan = val2; device++; while ((nextptr = strtok(NULL, ",")) != NULL) { get_intrange(nextptr, &val1, &val2); if (val1 < 0 || val1 > 100 || val2 < 0 || val2 > 100) return "Invalid value passed to set_gpu_fan"; gpus[device].min_fan = val1; gpus[device].gpu_fan = val2; device++; } if (device == 1) { for (i = 1; i < MAX_GPUDEVICES; i++) { gpus[i].min_fan = gpus[0].min_fan; gpus[i].gpu_fan = gpus[0].gpu_fan; } } return NULL; } char *set_gpu_memclock(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu memclock"; val = atoi(nextptr); if (val < 0 || val >= 9999) return "Invalid value passed to set_gpu_memclock"; gpus[device++].gpu_memclock = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); if (val < 0 || val >= 9999) return "Invalid value passed to set_gpu_memclock"; gpus[device++].gpu_memclock = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].gpu_memclock = gpus[0].gpu_memclock; } return NULL; } char *set_gpu_memdiff(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu memdiff"; val = atoi(nextptr); if (val < -9999 || val > 9999) return "Invalid value passed to set_gpu_memdiff"; gpus[device++].gpu_memdiff = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); if (val < -9999 || val > 9999) return "Invalid value passed to set_gpu_memdiff"; gpus[device++].gpu_memdiff = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].gpu_memdiff = gpus[0].gpu_memdiff; } return NULL; } char *set_gpu_powertune(char *arg) { int i, val = 0, device = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu powertune"; val = atoi(nextptr); if (val < -99 || val > 99) return "Invalid value passed to set_gpu_powertune"; gpus[device++].gpu_powertune = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); if (val < -99 || val > 99) return "Invalid value passed to set_gpu_powertune"; gpus[device++].gpu_powertune = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].gpu_powertune = gpus[0].gpu_powertune; } return NULL; } char *set_gpu_vddc(char *arg) { int i, device = 0; float val = 0; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set gpu vddc"; val = atof(nextptr); if (val < 0 || val >= 9999) return "Invalid value passed to set_gpu_vddc"; gpus[device++].gpu_vddc = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atof(nextptr); if (val < 0 || val >= 9999) return "Invalid value passed to set_gpu_vddc"; gpus[device++].gpu_vddc = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) gpus[i].gpu_vddc = gpus[0].gpu_vddc; } return NULL; } char *set_temp_overheat(char *arg) { int i, val = 0, device = 0, *to; char *nextptr; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set temp overheat"; val = atoi(nextptr); if (val < 0 || val > 200) return "Invalid value passed to set temp overheat"; to = &gpus[device++].adl.overtemp; *to = val; while ((nextptr = strtok(NULL, ",")) != NULL) { val = atoi(nextptr); if (val < 0 || val > 200) return "Invalid value passed to set temp overheat"; to = &gpus[device++].adl.overtemp; *to = val; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) { to = &gpus[i].adl.overtemp; *to = val; } } return NULL; } #endif #ifdef HAVE_OPENCL char *set_intensity(char *arg) { int i, device = 0, *tt; char *nextptr, val = 0; nextptr = strtok(arg, ","); if (nextptr == NULL) return "Invalid parameters for set intensity"; if (!strncasecmp(nextptr, "d", 1)) gpus[device].dynamic = true; else { gpus[device].dynamic = false; val = atoi(nextptr); if (val < MIN_INTENSITY || val > MAX_GPU_INTENSITY) return "Invalid value passed to set intensity"; tt = &gpus[device].intensity; *tt = val; } device++; while ((nextptr = strtok(NULL, ",")) != NULL) { if (!strncasecmp(nextptr, "d", 1)) gpus[device].dynamic = true; else { gpus[device].dynamic = false; val = atoi(nextptr); if (val < MIN_INTENSITY || val > MAX_GPU_INTENSITY) return "Invalid value passed to set intensity"; tt = &gpus[device].intensity; *tt = val; } device++; } if (device == 1) { for (i = device; i < MAX_GPUDEVICES; i++) { gpus[i].dynamic = gpus[0].dynamic; gpus[i].intensity = gpus[0].intensity; } } return NULL; } #endif #ifdef HAVE_OPENCL BFG_REGISTER_DRIVER(opencl_api) char *print_ndevs_and_exit(int *ndevs) { opt_log_output = true; opencl_api.drv_detect(); clear_adl(*ndevs); applog(LOG_INFO, "%i GPU devices max detected", *ndevs); exit(*ndevs); } #endif struct cgpu_info gpus[MAX_GPUDEVICES]; /* Maximum number apparently possible */ struct cgpu_info *cpus; #ifdef HAVE_OPENCL /* In dynamic mode, only the first thread of each device will be in use. * This potentially could start a thread that was stopped with the start-stop * options if one were to disable dynamic from the menu on a paused GPU */ void pause_dynamic_threads(int gpu) { struct cgpu_info *cgpu = &gpus[gpu]; int i; for (i = 1; i < cgpu->threads; i++) { struct thr_info *thr; thr = cgpu->thr[i]; if (!thr->pause && cgpu->dynamic) { applog(LOG_WARNING, "Disabling extra threads due to dynamic mode."); applog(LOG_WARNING, "Tune dynamic intensity with --gpu-dyninterval"); } thr->pause = cgpu->dynamic; if (!cgpu->dynamic && cgpu->deven != DEV_DISABLED) mt_enable(thr); } } struct device_drv opencl_api; #endif /* HAVE_OPENCL */ #if defined(HAVE_OPENCL) && defined(HAVE_CURSES) static void opencl_wlogprint_status(struct cgpu_info *cgpu) { struct thr_info *thr; int i; char checkin[40]; double displayed_rolling; bool mhash_base = !(cgpu->rolling < 1); char logline[255]; strcpy(logline, ""); // In case it has no data tailsprintf(logline, sizeof(logline), "I:%s%d ", (cgpu->dynamic ? "d" : ""), cgpu->intensity); #ifdef HAVE_ADL if (cgpu->has_adl) { int engineclock = 0, memclock = 0, activity = 0, fanspeed = 0, fanpercent = 0, powertune = 0; float temp = 0, vddc = 0; if (gpu_stats(cgpu->device_id, &temp, &engineclock, &memclock, &vddc, &activity, &fanspeed, &fanpercent, &powertune)) { if (fanspeed != -1 || fanpercent != -1) { tailsprintf(logline, sizeof(logline), "F: "); if (fanspeed > 9999) fanspeed = 9999; if (fanpercent != -1) { tailsprintf(logline, sizeof(logline), "%d%% ", fanpercent); if (fanspeed != -1) tailsprintf(logline, sizeof(logline), "(%d RPM) ", fanspeed); } else tailsprintf(logline, sizeof(logline), "%d RPM ", fanspeed); tailsprintf(logline, sizeof(logline), " "); } if (engineclock != -1) tailsprintf(logline, sizeof(logline), "E: %d MHz ", engineclock); if (memclock != -1) tailsprintf(logline, sizeof(logline), "M: %d MHz ", memclock); if (vddc != -1) tailsprintf(logline, sizeof(logline), "V: %.3fV ", vddc); if (activity != -1) tailsprintf(logline, sizeof(logline), "A: %d%% ", activity); if (powertune != -1) tailsprintf(logline, sizeof(logline), "P: %d%%", powertune); } } #endif wlogprint("%s\n", logline); wlogprint("Last initialised: %s\n", cgpu->init); for (i = 0; i < mining_threads; i++) { thr = get_thread(i); if (thr->cgpu != cgpu) continue; get_datestamp(checkin, sizeof(checkin), time(NULL) - timer_elapsed(&thr->last, NULL)); displayed_rolling = thr->rolling; if (!mhash_base) displayed_rolling *= 1000; snprintf(logline, sizeof(logline), "Thread %d: %.1f %sh/s %s ", i, displayed_rolling, mhash_base ? "M" : "K" , cgpu->deven != DEV_DISABLED ? "Enabled" : "Disabled"); switch (cgpu->status) { default: case LIFE_WELL: tailsprintf(logline, sizeof(logline), "ALIVE"); break; case LIFE_SICK: tailsprintf(logline, sizeof(logline), "SICK reported in %s", checkin); break; case LIFE_DEAD: tailsprintf(logline, sizeof(logline), "DEAD reported in %s", checkin); break; case LIFE_INIT: case LIFE_NOSTART: tailsprintf(logline, sizeof(logline), "Never started"); break; } if (thr->pause) tailsprintf(logline, sizeof(logline), " paused"); wlogprint("%s\n", logline); } } static void opencl_tui_wlogprint_choices(struct cgpu_info *cgpu) { wlogprint("[I]ntensity [R]estart GPU "); #ifdef HAVE_ADL if (cgpu->has_adl) wlogprint("[C]hange settings "); #endif } static const char *opencl_tui_handle_choice(struct cgpu_info *cgpu, int input) { switch (input) { case 'i': case 'I': { int intensity; char *intvar; if (opt_scrypt) { intvar = curses_input("Set GPU scan intensity (d or " MIN_SCRYPT_INTENSITY_STR " -> " MAX_SCRYPT_INTENSITY_STR ")"); } else { intvar = curses_input("Set GPU scan intensity (d or " MIN_SHA_INTENSITY_STR " -> " MAX_SHA_INTENSITY_STR ")"); } if (!intvar) return "Invalid intensity\n"; if (!strncasecmp(intvar, "d", 1)) { cgpu->dynamic = true; pause_dynamic_threads(cgpu->device_id); free(intvar); return "Dynamic mode enabled\n"; } intensity = atoi(intvar); free(intvar); if (intensity < MIN_INTENSITY || intensity > MAX_INTENSITY) return "Invalid intensity (out of range)\n"; cgpu->dynamic = false; cgpu->intensity = intensity; pause_dynamic_threads(cgpu->device_id); return "Intensity changed\n"; } case 'r': case 'R': reinit_device(cgpu); return "Attempting to restart\n"; case 'c': case 'C': { char logline[256]; clear_logwin(); get_statline3(logline, sizeof(logline), cgpu, true, true); wattron(logwin, A_BOLD); wlogprint("%s", logline); wattroff(logwin, A_BOLD); wlogprint("\n"); change_gpusettings(cgpu->device_id); return ""; // Force refresh } } return NULL; } #endif #ifdef HAVE_OPENCL static _clState *clStates[MAX_GPUDEVICES]; #define CL_SET_BLKARG(blkvar) status |= clSetKernelArg(*kernel, num++, sizeof(uint), (void *)&blk->blkvar) #define CL_SET_ARG(var) status |= clSetKernelArg(*kernel, num++, sizeof(var), (void *)&var) #define CL_SET_VARG(args, var) status |= clSetKernelArg(*kernel, num++, args * sizeof(uint), (void *)var) static cl_int queue_poclbm_kernel(_clState *clState, dev_blk_ctx *blk, cl_uint threads) { cl_kernel *kernel = &clState->kernel; unsigned int num = 0; cl_int status = 0; CL_SET_BLKARG(ctx_a); CL_SET_BLKARG(ctx_b); CL_SET_BLKARG(ctx_c); CL_SET_BLKARG(ctx_d); CL_SET_BLKARG(ctx_e); CL_SET_BLKARG(ctx_f); CL_SET_BLKARG(ctx_g); CL_SET_BLKARG(ctx_h); CL_SET_BLKARG(cty_b); CL_SET_BLKARG(cty_c); CL_SET_BLKARG(cty_f); CL_SET_BLKARG(cty_g); CL_SET_BLKARG(cty_h); if (!clState->goffset) { cl_uint vwidth = clState->vwidth; uint *nonces = alloca(sizeof(uint) * vwidth); unsigned int i; for (i = 0; i < vwidth; i++) nonces[i] = blk->nonce + (i * threads); CL_SET_VARG(vwidth, nonces); } CL_SET_BLKARG(fW0); CL_SET_BLKARG(fW1); CL_SET_BLKARG(fW2); CL_SET_BLKARG(fW3); CL_SET_BLKARG(fW15); CL_SET_BLKARG(fW01r); CL_SET_BLKARG(D1A); CL_SET_BLKARG(C1addK5); CL_SET_BLKARG(B1addK6); CL_SET_BLKARG(W16addK16); CL_SET_BLKARG(W17addK17); CL_SET_BLKARG(PreVal4addT1); CL_SET_BLKARG(PreVal0); CL_SET_ARG(clState->outputBuffer); return status; } static cl_int queue_phatk_kernel(_clState *clState, dev_blk_ctx *blk, __maybe_unused cl_uint threads) { cl_kernel *kernel = &clState->kernel; cl_uint vwidth = clState->vwidth; unsigned int i, num = 0; cl_int status = 0; uint *nonces; CL_SET_BLKARG(ctx_a); CL_SET_BLKARG(ctx_b); CL_SET_BLKARG(ctx_c); CL_SET_BLKARG(ctx_d); CL_SET_BLKARG(ctx_e); CL_SET_BLKARG(ctx_f); CL_SET_BLKARG(ctx_g); CL_SET_BLKARG(ctx_h); CL_SET_BLKARG(cty_b); CL_SET_BLKARG(cty_c); CL_SET_BLKARG(cty_d); CL_SET_BLKARG(cty_f); CL_SET_BLKARG(cty_g); CL_SET_BLKARG(cty_h); nonces = alloca(sizeof(uint) * vwidth); for (i = 0; i < vwidth; i++) nonces[i] = blk->nonce + i; CL_SET_VARG(vwidth, nonces); CL_SET_BLKARG(W16); CL_SET_BLKARG(W17); CL_SET_BLKARG(PreVal4_2); CL_SET_BLKARG(PreVal0); CL_SET_BLKARG(PreW18); CL_SET_BLKARG(PreW19); CL_SET_BLKARG(PreW31); CL_SET_BLKARG(PreW32); CL_SET_ARG(clState->outputBuffer); return status; } static cl_int queue_diakgcn_kernel(_clState *clState, dev_blk_ctx *blk, __maybe_unused cl_uint threads) { cl_kernel *kernel = &clState->kernel; unsigned int num = 0; cl_int status = 0; if (!clState->goffset) { cl_uint vwidth = clState->vwidth; uint *nonces = alloca(sizeof(uint) * vwidth); unsigned int i; for (i = 0; i < vwidth; i++) nonces[i] = blk->nonce + i; CL_SET_VARG(vwidth, nonces); } CL_SET_BLKARG(PreVal0); CL_SET_BLKARG(PreVal4_2); CL_SET_BLKARG(cty_h); CL_SET_BLKARG(D1A); CL_SET_BLKARG(cty_b); CL_SET_BLKARG(cty_c); CL_SET_BLKARG(cty_f); CL_SET_BLKARG(cty_g); CL_SET_BLKARG(C1addK5); CL_SET_BLKARG(B1addK6); CL_SET_BLKARG(PreVal0addK7); CL_SET_BLKARG(W16addK16); CL_SET_BLKARG(W17addK17); CL_SET_BLKARG(PreW18); CL_SET_BLKARG(PreW19); CL_SET_BLKARG(W16); CL_SET_BLKARG(W17); CL_SET_BLKARG(PreW31); CL_SET_BLKARG(PreW32); CL_SET_BLKARG(ctx_a); CL_SET_BLKARG(ctx_b); CL_SET_BLKARG(ctx_c); CL_SET_BLKARG(ctx_d); CL_SET_BLKARG(ctx_e); CL_SET_BLKARG(ctx_f); CL_SET_BLKARG(ctx_g); CL_SET_BLKARG(ctx_h); CL_SET_BLKARG(zeroA); CL_SET_BLKARG(zeroB); CL_SET_BLKARG(oneA); CL_SET_BLKARG(twoA); CL_SET_BLKARG(threeA); CL_SET_BLKARG(fourA); CL_SET_BLKARG(fiveA); CL_SET_BLKARG(sixA); CL_SET_BLKARG(sevenA); CL_SET_ARG(clState->outputBuffer); return status; } static cl_int queue_diablo_kernel(_clState *clState, dev_blk_ctx *blk, cl_uint threads) { cl_kernel *kernel = &clState->kernel; unsigned int num = 0; cl_int status = 0; if (!clState->goffset) { cl_uint vwidth = clState->vwidth; uint *nonces = alloca(sizeof(uint) * vwidth); unsigned int i; for (i = 0; i < vwidth; i++) nonces[i] = blk->nonce + (i * threads); CL_SET_VARG(vwidth, nonces); } CL_SET_BLKARG(PreVal0); CL_SET_BLKARG(PreVal0addK7); CL_SET_BLKARG(PreVal4addT1); CL_SET_BLKARG(PreW18); CL_SET_BLKARG(PreW19); CL_SET_BLKARG(W16); CL_SET_BLKARG(W17); CL_SET_BLKARG(W16addK16); CL_SET_BLKARG(W17addK17); CL_SET_BLKARG(PreW31); CL_SET_BLKARG(PreW32); CL_SET_BLKARG(D1A); CL_SET_BLKARG(cty_b); CL_SET_BLKARG(cty_c); CL_SET_BLKARG(cty_h); CL_SET_BLKARG(cty_f); CL_SET_BLKARG(cty_g); CL_SET_BLKARG(C1addK5); CL_SET_BLKARG(B1addK6); CL_SET_BLKARG(ctx_a); CL_SET_BLKARG(ctx_b); CL_SET_BLKARG(ctx_c); CL_SET_BLKARG(ctx_d); CL_SET_BLKARG(ctx_e); CL_SET_BLKARG(ctx_f); CL_SET_BLKARG(ctx_g); CL_SET_BLKARG(ctx_h); CL_SET_ARG(clState->outputBuffer); return status; } #ifdef USE_SCRYPT static cl_int queue_scrypt_kernel(_clState *clState, dev_blk_ctx *blk, __maybe_unused cl_uint threads) { unsigned char *midstate = blk->work->midstate; cl_kernel *kernel = &clState->kernel; unsigned int num = 0; cl_uint le_target; cl_int status = 0; le_target = *(cl_uint *)(blk->work->target + 28); clState->cldata = blk->work->data; status = clEnqueueWriteBuffer(clState->commandQueue, clState->CLbuffer0, true, 0, 80, clState->cldata, 0, NULL,NULL); CL_SET_ARG(clState->CLbuffer0); CL_SET_ARG(clState->outputBuffer); CL_SET_ARG(clState->padbuffer8); CL_SET_VARG(4, &midstate[0]); CL_SET_VARG(4, &midstate[16]); CL_SET_ARG(le_target); return status; } #endif static void set_threads_hashes(unsigned int vectors,int64_t *hashes, size_t *globalThreads, unsigned int minthreads, __maybe_unused int *intensity) { unsigned int threads = 0; while (threads < minthreads) { threads = 1 << ((opt_scrypt ? 0 : 15) + *intensity); if (threads < minthreads) { if (likely(*intensity < MAX_INTENSITY)) (*intensity)++; else threads = minthreads; } } *globalThreads = threads; *hashes = threads * vectors; } #endif /* HAVE_OPENCL */ #ifdef HAVE_OPENCL /* We have only one thread that ever re-initialises GPUs, thus if any GPU * init command fails due to a completely wedged GPU, the thread will never * return, unable to harm other GPUs. If it does return, it means we only had * a soft failure and then the reinit_gpu thread is ready to tackle another * GPU */ void *reinit_gpu(void *userdata) { struct thr_info *mythr = userdata; struct cgpu_info *cgpu, *sel_cgpu; struct thr_info *thr; char name[256]; int thr_id; int i; pthread_detach(pthread_self()); RenameThread("reinit_gpu"); select_cgpu: sel_cgpu = cgpu = tq_pop(mythr->q, NULL); if (!cgpu) goto out; if (clDevicesNum() != nDevs) { applog(LOG_WARNING, "Hardware not reporting same number of active devices, will not attempt to restart GPU"); goto out; } for (i = 0; i < cgpu->threads; ++i) { thr = cgpu->thr[i]; thr_id = thr->id; thr->rolling = thr->cgpu->rolling = 0; /* Reports the last time we tried to revive a sick GPU */ cgtime(&thr->sick); if (!pthread_cancel(thr->pth)) { applog(LOG_WARNING, "Thread %d still exists, killing it off", thr_id); } else applog(LOG_WARNING, "Thread %d no longer exists", thr_id); } for (i = 0; i < cgpu->threads; ++i) { int virtual_gpu; thr = cgpu->thr[i]; thr_id = thr->id; virtual_gpu = cgpu->virtual_gpu; /* Lose this ram cause we may get stuck here! */ //tq_freeze(thr->q); thr->q = tq_new(); if (!thr->q) quithere(1, "Failed to tq_new"); /* Lose this ram cause we may dereference in the dying thread! */ //free(clState); applog(LOG_INFO, "Reinit GPU thread %d", thr_id); clStates[thr_id] = initCl(virtual_gpu, name, sizeof(name)); if (!clStates[thr_id]) { applog(LOG_ERR, "Failed to reinit GPU thread %d", thr_id); goto select_cgpu; } applog(LOG_INFO, "initCl() finished. Found %s", name); if (unlikely(thr_info_create(thr, NULL, miner_thread, thr))) { applog(LOG_ERR, "thread %d create failed", thr_id); return NULL; } applog(LOG_WARNING, "Thread %d restarted", thr_id); } get_now_datestamp(sel_cgpu->init, sizeof(sel_cgpu->init)); proc_enable(cgpu); goto select_cgpu; out: return NULL; } #else void *reinit_gpu(__maybe_unused void *userdata) { return NULL; } #endif #ifdef HAVE_OPENCL struct device_drv opencl_api; static int opencl_autodetect() { RUNONCE(0); #ifndef WIN32 if (!getenv("DISPLAY")) { applog(LOG_DEBUG, "DISPLAY not set, setting :0 just in case"); setenv("DISPLAY", ":0", 1); } #endif if (!load_opencl_symbols()) { nDevs = 0; return 0; } int i; nDevs = clDevicesNum(); if (nDevs < 0) { applog(LOG_ERR, "clDevicesNum returned error, no GPUs usable"); nDevs = 0; } if (!nDevs) return 0; /* If opt_g_threads is not set, use default 1 thread on scrypt and * 2 for regular mining */ if (opt_g_threads == -1) { if (opt_scrypt) opt_g_threads = 1; else opt_g_threads = 2; } #ifdef HAVE_SENSORS struct opencl_device_data *data; const sensors_chip_name *cn; int c = 0; sensors_init(NULL); sensors_chip_name cnm; if (sensors_parse_chip_name("radeon-*", &cnm)) c = -1; #endif for (i = 0; i < nDevs; ++i) { struct cgpu_info *cgpu; cgpu = &gpus[i]; cgpu->devtype = "GPU"; cgpu->deven = DEV_ENABLED; cgpu->drv = &opencl_api; cgpu->device_id = i; cgpu->threads = opt_g_threads; cgpu->virtual_gpu = i; #ifdef HAVE_SENSORS cn = (c == -1) ? NULL : sensors_get_detected_chips(&cnm, &c); cgpu->device_data = data = malloc(sizeof(*data)); *data = (struct opencl_device_data){ .sensor = cn, }; #endif add_cgpu(cgpu); } if (!opt_noadl) init_adl(nDevs); return nDevs; } static void opencl_detect() { noserial_detect_manual(&opencl_api, opencl_autodetect); } static void reinit_opencl_device(struct cgpu_info *gpu) { tq_push(control_thr[gpur_thr_id].q, gpu); } // FIXME: Legacy (called by TUI) for side effects static bool override_opencl_statline_temp(char *buf, size_t bufsz, struct cgpu_info *gpu, __maybe_unused bool per_processor) { #ifdef HAVE_SENSORS struct opencl_device_data *data = gpu->device_data; if (data->sensor) { const sensors_chip_name *cn = data->sensor; const sensors_feature *feat; for (int f = 0; (feat = sensors_get_features(cn, &f)); ) { const sensors_subfeature *subf; subf = sensors_get_subfeature(cn, feat, SENSORS_SUBFEATURE_TEMP_INPUT); if (!(subf && subf->flags & SENSORS_MODE_R)) continue; double val; int rc = sensors_get_value(cn, subf->number, &val); if (rc) continue; gpu->temp = val; return false; } } #endif #ifdef HAVE_ADL if (gpu->has_adl) { int gpuid = gpu->device_id; gpu_temp(gpuid); gpu_fanspeed(gpuid); } #endif return false; } static struct api_data* get_opencl_api_extra_device_status(struct cgpu_info *gpu) { struct api_data*root = NULL; float gt, gv; int ga, gf, gp, gc, gm, pt; #ifdef HAVE_ADL if (!gpu_stats(gpu->device_id, >, &gc, &gm, &gv, &ga, &gf, &gp, &pt)) #endif gt = gv = gm = gc = ga = gf = gp = pt = 0; root = api_add_int(root, "Fan Speed", &gf, true); root = api_add_int(root, "Fan Percent", &gp, true); root = api_add_int(root, "GPU Clock", &gc, true); root = api_add_int(root, "Memory Clock", &gm, true); root = api_add_volts(root, "GPU Voltage", &gv, true); root = api_add_int(root, "GPU Activity", &ga, true); root = api_add_int(root, "Powertune", &pt, true); char intensity[20]; if (gpu->dynamic) strcpy(intensity, "D"); else sprintf(intensity, "%d", gpu->intensity); root = api_add_string(root, "Intensity", intensity, true); return root; } struct opencl_thread_data { cl_int (*queue_kernel_parameters)(_clState *, dev_blk_ctx *, cl_uint); uint32_t *res; }; static uint32_t *blank_res; static bool opencl_thread_prepare(struct thr_info *thr) { char name[256]; struct cgpu_info *cgpu = thr->cgpu; int gpu = cgpu->device_id; int virtual_gpu = cgpu->virtual_gpu; int i = thr->id; static bool failmessage = false; int buffersize = opt_scrypt ? SCRYPT_BUFFERSIZE : BUFFERSIZE; if (!blank_res) blank_res = calloc(buffersize, 1); if (!blank_res) { applog(LOG_ERR, "Failed to calloc in opencl_thread_init"); return false; } strcpy(name, ""); applog(LOG_INFO, "Init GPU thread %i GPU %i virtual GPU %i", i, gpu, virtual_gpu); clStates[i] = initCl(virtual_gpu, name, sizeof(name)); if (!clStates[i]) { #ifdef HAVE_CURSES if (use_curses) enable_curses(); #endif applog(LOG_ERR, "Failed to init GPU thread %d, disabling device %d", i, gpu); if (!failmessage) { applog(LOG_ERR, "Restarting the GPU from the menu will not fix this."); applog(LOG_ERR, "Try restarting BFGMiner."); failmessage = true; #ifdef HAVE_CURSES char *buf; if (use_curses) { buf = curses_input("Press enter to continue"); if (buf) free(buf); } #endif } cgpu->deven = DEV_DISABLED; cgpu->status = LIFE_NOSTART; dev_error(cgpu, REASON_DEV_NOSTART); return false; } if (!cgpu->name) cgpu->name = strdup(name); if (!cgpu->kname) { switch (clStates[i]->chosen_kernel) { case KL_DIABLO: cgpu->kname = "diablo"; break; case KL_DIAKGCN: cgpu->kname = "diakgcn"; break; case KL_PHATK: cgpu->kname = "phatk"; break; #ifdef USE_SCRYPT case KL_SCRYPT: cgpu->kname = "scrypt"; break; #endif case KL_POCLBM: cgpu->kname = "poclbm"; break; default: break; } } applog(LOG_INFO, "initCl() finished. Found %s", name); get_now_datestamp(cgpu->init, sizeof(cgpu->init)); have_opencl = true; return true; } static bool opencl_thread_init(struct thr_info *thr) { const int thr_id = thr->id; struct cgpu_info *gpu = thr->cgpu; struct opencl_thread_data *thrdata; _clState *clState = clStates[thr_id]; cl_int status = 0; thrdata = calloc(1, sizeof(*thrdata)); thr->cgpu_data = thrdata; int buffersize = opt_scrypt ? SCRYPT_BUFFERSIZE : BUFFERSIZE; if (!thrdata) { applog(LOG_ERR, "Failed to calloc in opencl_thread_init"); return false; } switch (clState->chosen_kernel) { case KL_POCLBM: thrdata->queue_kernel_parameters = &queue_poclbm_kernel; break; case KL_PHATK: thrdata->queue_kernel_parameters = &queue_phatk_kernel; break; case KL_DIAKGCN: thrdata->queue_kernel_parameters = &queue_diakgcn_kernel; break; #ifdef USE_SCRYPT case KL_SCRYPT: thrdata->queue_kernel_parameters = &queue_scrypt_kernel; break; #endif default: case KL_DIABLO: thrdata->queue_kernel_parameters = &queue_diablo_kernel; break; } thrdata->res = calloc(buffersize, 1); if (!thrdata->res) { free(thrdata); applog(LOG_ERR, "Failed to calloc in opencl_thread_init"); return false; } status |= clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_TRUE, 0, buffersize, blank_res, 0, NULL, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); return false; } gpu->status = LIFE_WELL; gpu->device_last_well = time(NULL); return true; } static bool opencl_prepare_work(struct thr_info __maybe_unused *thr, struct work *work) { #ifdef USE_SCRYPT if (opt_scrypt) work->blk.work = work; else #endif precalc_hash(&work->blk, (uint32_t *)(work->midstate), (uint32_t *)(work->data + 64)); return true; } extern int opt_dynamic_interval; static int64_t opencl_scanhash(struct thr_info *thr, struct work *work, int64_t __maybe_unused max_nonce) { const int thr_id = thr->id; struct opencl_thread_data *thrdata = thr->cgpu_data; struct cgpu_info *gpu = thr->cgpu; _clState *clState = clStates[thr_id]; const cl_kernel *kernel = &clState->kernel; const int dynamic_us = opt_dynamic_interval * 1000; cl_int status; size_t globalThreads[1]; size_t localThreads[1] = { clState->wsize }; int64_t hashes; int found = opt_scrypt ? SCRYPT_FOUND : FOUND; int buffersize = opt_scrypt ? SCRYPT_BUFFERSIZE : BUFFERSIZE; /* Windows' timer resolution is only 15ms so oversample 5x */ if (gpu->dynamic && (++gpu->intervals * dynamic_us) > 70000) { struct timeval tv_gpuend; double gpu_us; cgtime(&tv_gpuend); gpu_us = us_tdiff(&tv_gpuend, &gpu->tv_gpustart) / gpu->intervals; if (gpu_us > dynamic_us) { if (gpu->intensity > MIN_INTENSITY) --gpu->intensity; } else if (gpu_us < dynamic_us / 2) { if (gpu->intensity < MAX_INTENSITY) ++gpu->intensity; } memcpy(&(gpu->tv_gpustart), &tv_gpuend, sizeof(struct timeval)); gpu->intervals = 0; } set_threads_hashes(clState->vwidth, &hashes, globalThreads, localThreads[0], &gpu->intensity); if (hashes > gpu->max_hashes) gpu->max_hashes = hashes; status = thrdata->queue_kernel_parameters(clState, &work->blk, globalThreads[0]); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error: clSetKernelArg of all params failed."); return -1; } if (clState->goffset) { size_t global_work_offset[1]; global_work_offset[0] = work->blk.nonce; status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, global_work_offset, globalThreads, localThreads, 0, NULL, NULL); } else status = clEnqueueNDRangeKernel(clState->commandQueue, *kernel, 1, NULL, globalThreads, localThreads, 0, NULL, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Enqueueing kernel onto command queue. (clEnqueueNDRangeKernel)", status); return -1; } status = clEnqueueReadBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0, buffersize, thrdata->res, 0, NULL, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error: clEnqueueReadBuffer failed error %d. (clEnqueueReadBuffer)", status); return -1; } /* The amount of work scanned can fluctuate when intensity changes * and since we do this one cycle behind, we increment the work more * than enough to prevent repeating work */ work->blk.nonce += gpu->max_hashes; /* This finish flushes the readbuffer set with CL_FALSE in clEnqueueReadBuffer */ clFinish(clState->commandQueue); /* FOUND entry is used as a counter to say how many nonces exist */ if (thrdata->res[found]) { /* Clear the buffer again */ status = clEnqueueWriteBuffer(clState->commandQueue, clState->outputBuffer, CL_FALSE, 0, buffersize, blank_res, 0, NULL, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error: clEnqueueWriteBuffer failed."); return -1; } applog(LOG_DEBUG, "GPU %d found something?", gpu->device_id); postcalc_hash_async(thr, work, thrdata->res); memset(thrdata->res, 0, buffersize); /* This finish flushes the writebuffer set with CL_FALSE in clEnqueueWriteBuffer */ clFinish(clState->commandQueue); } return hashes; } static void opencl_thread_shutdown(struct thr_info *thr) { const int thr_id = thr->id; _clState *clState = clStates[thr_id]; clReleaseKernel(clState->kernel); clReleaseProgram(clState->program); clReleaseCommandQueue(clState->commandQueue); clReleaseContext(clState->context); } struct device_drv opencl_api = { .dname = "opencl", .name = "OCL", .probe_priority = 110, .supported_algos = POW_SHA256D | POW_SCRYPT, .drv_detect = opencl_detect, .reinit_device = reinit_opencl_device, .override_statline_temp2 = override_opencl_statline_temp, #ifdef HAVE_CURSES .proc_wlogprint_status = opencl_wlogprint_status, .proc_tui_wlogprint_choices = opencl_tui_wlogprint_choices, .proc_tui_handle_choice = opencl_tui_handle_choice, #endif .get_api_extra_device_status = get_opencl_api_extra_device_status, .thread_prepare = opencl_thread_prepare, .thread_init = opencl_thread_init, .prepare_work = opencl_prepare_work, .scanhash = opencl_scanhash, .thread_shutdown = opencl_thread_shutdown, }; #endif bfgminer-bfgminer-3.10.0/driver-opencl.h000066400000000000000000000021201226556647300201100ustar00rootroot00000000000000#ifndef __DEVICE_GPU_H__ #define __DEVICE_GPU_H__ #include #include "miner.h" extern char *print_ndevs_and_exit(int *ndevs); extern void *reinit_gpu(void *userdata); extern char *set_gpu_map(char *arg); extern char *set_gpu_engine(char *arg); extern char *set_gpu_fan(char *arg); extern char *set_gpu_memclock(char *arg); extern char *set_gpu_memdiff(char *arg); extern char *set_gpu_powertune(char *arg); extern char *set_gpu_vddc(char *arg); extern char *set_temp_overheat(char *arg); extern char *set_temp_target(char *arg); extern char *set_intensity(char *arg); extern char *set_vector(char *arg); extern char *set_worksize(char *arg); #ifdef USE_SCRYPT extern char *set_shaders(char *arg); extern char *set_lookup_gap(char *arg); extern char *set_thread_concurrency(char *arg); #endif extern char *set_kernel(char *arg); void manage_gpu(void); extern void opencl_dynamic_cleanup(); extern void pause_dynamic_threads(int gpu); extern bool have_opencl; extern int opt_platform_id; extern bool opt_opencl_binaries; extern struct device_drv opencl_api; #endif /* __DEVICE_GPU_H__ */ bfgminer-bfgminer-3.10.0/driver-proxy.c000066400000000000000000000054401226556647300200140ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include "deviceapi.h" #include "driver-proxy.h" #include "miner.h" #include "util.h" BFG_REGISTER_DRIVER(proxy_drv) static struct proxy_client *proxy_clients; static pthread_mutex_t proxy_clients_mutex = PTHREAD_MUTEX_INITIALIZER; static void prune_worklog() { struct proxy_client *client, *tmp; struct work *work, *tmp2; struct timeval tv_now; timer_set_now(&tv_now); mutex_lock(&proxy_clients_mutex); HASH_ITER(hh, proxy_clients, client, tmp) { HASH_ITER(hh, client->work, work, tmp2) { if (timer_elapsed(&work->tv_work_start, &tv_now) <= opt_expiry) break; HASH_DEL(client->work, work); free_work(work); } } mutex_unlock(&proxy_clients_mutex); } static pthread_t prune_worklog_pth; static void *prune_worklog_thread(void *userdata) { struct cgpu_info *cgpu = userdata; pthread_detach(pthread_self()); RenameThread("PXY_pruner"); while (!cgpu->shutdown) { prune_worklog(); sleep(60); } return NULL; } static void proxy_first_client(struct cgpu_info *cgpu) { pthread_create(&prune_worklog_pth, NULL, prune_worklog_thread, cgpu); } struct proxy_client *proxy_find_or_create_client(const char *username) { struct proxy_client *client; struct cgpu_info *cgpu; char *user; int b; if (!username) return NULL; mutex_lock(&proxy_clients_mutex); HASH_FIND_STR(proxy_clients, username, client); if (!client) { user = strdup(username); cgpu = malloc(sizeof(*cgpu)); client = malloc(sizeof(*client)); *cgpu = (struct cgpu_info){ .drv = &proxy_drv, .threads = 0, .device_data = client, .device_path = user, }; timer_set_now(&cgpu->cgminer_stats.start_tv); if (unlikely(!create_new_cgpus(add_cgpu_live, cgpu))) { free(client); free(cgpu); free(user); return NULL; } *client = (struct proxy_client){ .username = user, .cgpu = cgpu, }; b = HASH_COUNT(proxy_clients); HASH_ADD_KEYPTR(hh, proxy_clients, client->username, strlen(user), client); mutex_unlock(&proxy_clients_mutex); if (!b) proxy_first_client(cgpu); } else mutex_unlock(&proxy_clients_mutex); return client; } #ifdef HAVE_CURSES static void proxy_wlogprint_status(struct cgpu_info *cgpu) { struct proxy_client *client = cgpu->device_data; wlogprint("Username: %s\n", client->username); } #endif struct device_drv proxy_drv = { .dname = "proxy", .name = "PXY", #ifdef HAVE_CURSES .proc_wlogprint_status = proxy_wlogprint_status, #endif }; bfgminer-bfgminer-3.10.0/driver-proxy.h000066400000000000000000000005011226556647300200120ustar00rootroot00000000000000#ifndef BFG_DRIVER_PROXY_H #define BFG_DRIVER_PROXY_H #include #include "miner.h" struct proxy_client { char *username; struct cgpu_info *cgpu; struct work *work; struct timeval tv_hashes_done; UT_hash_handle hh; }; extern struct proxy_client *proxy_find_or_create_client(const char *user); #endif bfgminer-bfgminer-3.10.0/driver-stratum.c000066400000000000000000000426741226556647300203440ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifndef WIN32 #include #else #include #endif #include #include #include #include #include #include #include #include #include "deviceapi.h" #include "driver-proxy.h" #include "miner.h" #include "util.h" #define MAX_CLIENTS 255 static bool _ssm_xnonce1s[MAX_CLIENTS + 1] = { true }; static uint8_t _ssm_client_octets; static uint8_t _ssm_client_xnonce2sz; static char *_ssm_notify; static int _ssm_notify_sz; static struct event *ev_notify; static notifier_t _ssm_update_notifier; struct stratumsrv_job { char *my_job_id; struct pool *pool; uint8_t work_restart_id; uint8_t n2size; struct timeval tv_prepared; struct stratum_work swork; char *nonce1; UT_hash_handle hh; }; static struct stratumsrv_job *_ssm_jobs; static struct work _ssm_cur_job_work; static uint64_t _ssm_jobid; static struct event_base *_smm_evbase; static bool _smm_running; static struct evconnlistener *_smm_listener; struct stratumsrv_conn { struct bufferevent *bev; uint32_t xnonce1_le; struct timeval tv_hashes_done; bool hashes_done_ext; struct stratumsrv_conn *next; }; static struct stratumsrv_conn *_ssm_connections; static void _ssm_gen_dummy_work(struct work *work, struct stratumsrv_job *ssj, const char * const extranonce2, uint32_t xnonce1) { uint8_t *p, *s; *work = (struct work){ .pool = ssj->pool, .work_restart_id = ssj->work_restart_id, .tv_staged = ssj->tv_prepared, }; bytes_resize(&work->nonce2, ssj->n2size); s = bytes_buf(&work->nonce2); p = &s[ssj->n2size - _ssm_client_xnonce2sz]; if (extranonce2) hex2bin(p, extranonce2, _ssm_client_xnonce2sz); #ifndef __OPTIMIZE__ else memset(p, '\0', _ssm_client_xnonce2sz); #endif p -= _ssm_client_octets; memcpy(p, &xnonce1, _ssm_client_octets); if (p != s) memset(s, '\xbb', p - s); gen_stratum_work2(work, &ssj->swork, ssj->nonce1); } static bool stratumsrv_update_notify_str(struct pool * const pool, bool clean) { cg_rlock(&pool->data_lock); struct stratumsrv_conn *conn; const struct stratum_work * const swork = &pool->swork; const int n2size = pool->n2size; char my_job_id[33]; int i; struct stratumsrv_job *ssj; ssize_t n2pad = n2size - _ssm_client_octets - _ssm_client_xnonce2sz; if (n2pad < 0) return false; size_t coinb1in_lenx = swork->nonce2_offset * 2; size_t n2padx = n2pad * 2; size_t coinb1_lenx = coinb1in_lenx + n2padx; size_t coinb2_len = bytes_len(&swork->coinbase) - swork->nonce2_offset - n2size; size_t coinb2_lenx = coinb2_len * 2; sprintf(my_job_id, "%"PRIx64"-%"PRIx64, (uint64_t)time(NULL), _ssm_jobid++); size_t bufsz = 166 + strlen(my_job_id) + coinb1_lenx + coinb2_lenx + (swork->merkles * 67); char * const buf = malloc(bufsz); char *p = buf; char prevhash[65], coinb1[coinb1_lenx + 1], coinb2[coinb2_lenx], version[9], nbits[9], ntime[9]; uint32_t ntime_n; bin2hex(prevhash, &swork->header1[4], 32); bin2hex(coinb1, bytes_buf(&swork->coinbase), swork->nonce2_offset); memset(&coinb1[coinb1in_lenx], 'B', n2padx); coinb1[coinb1_lenx] = '\0'; bin2hex(coinb2, &bytes_buf(&swork->coinbase)[swork->nonce2_offset + n2size], coinb2_len); p += sprintf(p, "{\"params\":[\"%s\",\"%s\",\"%s\",\"%s\",[", my_job_id, prevhash, coinb1, coinb2); for (i = 0; i < swork->merkles; ++i) { if (i) *p++ = ','; *p++ = '"'; bin2hex(p, &bytes_buf(&swork->merkle_bin)[i * 32], 32); p += 64; *p++ = '"'; } bin2hex(version, swork->header1, 4); bin2hex(nbits, swork->diffbits, 4); ntime_n = htobe32(swork->ntime + timer_elapsed(&swork->tv_received, NULL)); bin2hex(ntime, &ntime_n, 4); p += sprintf(p, "],\"%s\",\"%s\",\"%s\",%s],\"method\":\"mining.notify\",\"id\":null}\n", version, nbits, ntime, clean ? "true" : "false"); ssj = malloc(sizeof(*ssj)); *ssj = (struct stratumsrv_job){ .my_job_id = strdup(my_job_id), .pool = pool, .work_restart_id = pool->work_restart_id, .n2size = n2size, .nonce1 = strdup(pool->nonce1), }; timer_set_now(&ssj->tv_prepared); stratum_work_cpy(&ssj->swork, swork); cg_runlock(&pool->data_lock); ssj->swork.data_lock_p = NULL; HASH_ADD_KEYPTR(hh, _ssm_jobs, ssj->my_job_id, strlen(ssj->my_job_id), ssj); if (likely(_ssm_cur_job_work.pool)) clean_work(&_ssm_cur_job_work); _ssm_gen_dummy_work(&_ssm_cur_job_work, ssj, NULL, 0); _ssm_notify_sz = p - buf; assert(_ssm_notify_sz <= bufsz); free(_ssm_notify); _ssm_notify = buf; LL_FOREACH(_ssm_connections, conn) { if (unlikely(!conn->xnonce1_le)) continue; bufferevent_write(conn->bev, _ssm_notify, _ssm_notify_sz); } return true; } static void _ssj_free(struct stratumsrv_job * const ssj) { free(ssj->my_job_id); stratum_work_clean(&ssj->swork); free(ssj->nonce1); free(ssj); } static void stratumsrv_job_pruner() { struct stratumsrv_job *ssj, *tmp_ssj; struct timeval tv_now; timer_set_now(&tv_now); HASH_ITER(hh, _ssm_jobs, ssj, tmp_ssj) { if (timer_elapsed(&ssj->tv_prepared, &tv_now) <= opt_expiry) break; HASH_DEL(_ssm_jobs, ssj); applog(LOG_DEBUG, "SSM: Pruning job_id %s", ssj->my_job_id); _ssj_free(ssj); } } static void stratumsrv_client_close(struct stratumsrv_conn *); static void stratumsrv_conn_close_completion_cb(struct bufferevent *bev, void *p) { struct evbuffer * const output = bufferevent_get_output(bev); if (evbuffer_get_length(output)) // Still have more data to write... return; stratumsrv_client_close(p); } static void stratumsrv_event(struct bufferevent *, short, void *); static void stratumsrv_boot(struct stratumsrv_conn * const conn, const char * const msg) { struct bufferevent * const bev = conn->bev; char buf[58 + strlen(msg)]; int bufsz = sprintf(buf, "{\"params\":[\"%s\"],\"method\":\"client.show_message\",\"id\":null}\n", msg); bufferevent_write(bev, buf, bufsz); bufferevent_setcb(bev, NULL, stratumsrv_conn_close_completion_cb, stratumsrv_event, conn); } static void stratumsrv_boot_all_subscribed(const char * const msg) { struct stratumsrv_conn *conn, *tmp_conn; free(_ssm_notify); _ssm_notify = NULL; // Boot all connections LL_FOREACH_SAFE(_ssm_connections, conn, tmp_conn) { if (!conn->xnonce1_le) continue; stratumsrv_boot(conn, msg); } } static void _stratumsrv_update_notify(evutil_socket_t fd, short what, __maybe_unused void *p) { struct pool *pool = current_pool(); bool clean; if (fd == _ssm_update_notifier[0]) { evtimer_del(ev_notify); notifier_read(_ssm_update_notifier); applog(LOG_DEBUG, "SSM: Update triggered by notifier"); } clean = _ssm_cur_job_work.pool ? stale_work(&_ssm_cur_job_work, true) : true; if (clean) { struct stratumsrv_job *ssj, *tmp; applog(LOG_DEBUG, "SSM: Current replacing job stale, pruning all jobs"); HASH_ITER(hh, _ssm_jobs, ssj, tmp) { HASH_DEL(_ssm_jobs, ssj); _ssj_free(ssj); } } else stratumsrv_job_pruner(); if (!pool->stratum_notify) { applog(LOG_WARNING, "SSM: Not using a stratum server upstream!"); if (clean) stratumsrv_boot_all_subscribed("Current upstream pool does not have active stratum"); goto out; } if (!stratumsrv_update_notify_str(pool, clean)) { applog(LOG_WARNING, "SSM: Failed to subdivide upstream stratum notify!"); if (clean) stratumsrv_boot_all_subscribed("Current upstream pool does not have active stratum"); } out: ; struct timeval tv_scantime = { .tv_sec = opt_scantime, }; evtimer_add(ev_notify, &tv_scantime); } static struct proxy_client *_stratumsrv_find_or_create_client(const char *); static struct proxy_client *(*stratumsrv_find_or_create_client)(const char *) = _stratumsrv_find_or_create_client; static struct proxy_client *_stratumsrv_find_or_create_client(const char *user) { struct proxy_client * const client = proxy_find_or_create_client(user); struct cgpu_info *cgpu; struct thr_info *thr; if (!client) return NULL; cgpu = client->cgpu; thr = cgpu->thr[0]; memcpy(thr->work_restart_notifier, _ssm_update_notifier, sizeof(thr->work_restart_notifier)); stratumsrv_find_or_create_client = proxy_find_or_create_client; return client; } static void _stratumsrv_failure(struct bufferevent * const bev, const char * const idstr, const int e, const char * const emsg) { if (!idstr) return; char buf[0x100]; size_t bufsz = snprintf(buf, sizeof(buf), "{\"error\":[%d,\"%s\",null],\"id\":%s,\"result\":null}\n", e, emsg, idstr); bufferevent_write(bev, buf, bufsz); } #define return_stratumsrv_failure(e, emsg) do{ \ _stratumsrv_failure(bev, idstr, e, emsg); \ return; \ }while(0) static void _stratumsrv_success(struct bufferevent * const bev, const char * const idstr) { if (!idstr) return; size_t bufsz = 36 + strlen(idstr); char buf[bufsz]; bufsz = sprintf(buf, "{\"result\":true,\"id\":%s,\"error\":null}\n", idstr); bufferevent_write(bev, buf, bufsz); } static void stratumsrv_mining_subscribe(struct bufferevent *bev, json_t *params, const char *idstr, uint32_t *xnonce1_p) { char buf[90 + strlen(idstr) + (_ssm_client_octets * 2 * 2) + 0x10]; char xnonce1x[(_ssm_client_octets * 2) + 1]; int bufsz; if (!_ssm_notify) { evtimer_del(ev_notify); _stratumsrv_update_notify(-1, 0, NULL); if (!_ssm_notify) return_stratumsrv_failure(20, "No notify set (upstream not stratum?)"); } if (!*xnonce1_p) { uint32_t xnonce1; for (xnonce1 = MAX_CLIENTS; _ssm_xnonce1s[xnonce1]; --xnonce1) if (!xnonce1) return_stratumsrv_failure(20, "Maximum clients already connected"); *xnonce1_p = htole32(xnonce1); } bin2hex(xnonce1x, xnonce1_p, _ssm_client_octets); bufsz = sprintf(buf, "{\"id\":%s,\"result\":[[[\"mining.set_difficulty\",\"x\"],[\"mining.notify\",\"%s\"]],\"%s\",%d],\"error\":null}\n", idstr, xnonce1x, xnonce1x, _ssm_client_xnonce2sz); bufferevent_write(bev, buf, bufsz); bufferevent_write(bev, "{\"params\":[0.9999847412109375],\"id\":null,\"method\":\"mining.set_difficulty\"}\n", 75); bufferevent_write(bev, _ssm_notify, _ssm_notify_sz); } static void stratumsrv_mining_authorize(struct bufferevent *bev, json_t *params, const char *idstr, uint32_t *xnonce1_p) { struct proxy_client * const client = stratumsrv_find_or_create_client(__json_array_string(params, 0)); if (unlikely(!client)) return_stratumsrv_failure(20, "Failed creating new cgpu"); _stratumsrv_success(bev, idstr); } static void stratumsrv_mining_submit(struct bufferevent *bev, json_t *params, const char *idstr, struct stratumsrv_conn * const conn) { uint32_t * const xnonce1_p = &conn->xnonce1_le; struct work _work, *work; struct stratumsrv_job *ssj; struct proxy_client *client = stratumsrv_find_or_create_client(__json_array_string(params, 0)); struct cgpu_info *cgpu; struct thr_info *thr; const char * const job_id = __json_array_string(params, 1); const char * const extranonce2 = __json_array_string(params, 2); const char * const ntime = __json_array_string(params, 3); const char * const nonce = __json_array_string(params, 4); uint32_t nonce_n; if (unlikely(!client)) return_stratumsrv_failure(20, "Failed creating new cgpu"); if (unlikely(!(job_id && extranonce2 && ntime && nonce))) return_stratumsrv_failure(20, "Couldn't understand parameters"); if (unlikely(strlen(nonce) < 8)) return_stratumsrv_failure(20, "nonce too short"); if (unlikely(strlen(ntime) < 8)) return_stratumsrv_failure(20, "ntime too short"); if (unlikely(strlen(extranonce2) < _ssm_client_xnonce2sz * 2)) return_stratumsrv_failure(20, "extranonce2 too short"); cgpu = client->cgpu; thr = cgpu->thr[0]; // Lookup job_id HASH_FIND_STR(_ssm_jobs, job_id, ssj); if (!ssj) return_stratumsrv_failure(21, "Job not found"); // Generate dummy work work = &_work; _ssm_gen_dummy_work(work, ssj, extranonce2, *xnonce1_p); // Submit nonce hex2bin(&work->data[68], ntime, 4); hex2bin((void*)&nonce_n, nonce, 4); nonce_n = le32toh(nonce_n); if (!submit_nonce(thr, work, nonce_n)) _stratumsrv_failure(bev, idstr, 23, "H-not-zero"); else if (stale_work(work, true)) _stratumsrv_failure(bev, idstr, 21, "stale"); else _stratumsrv_success(bev, idstr); clean_work(work); if (!conn->hashes_done_ext) { struct timeval tv_now, tv_delta; timer_set_now(&tv_now); timersub(&tv_now, &conn->tv_hashes_done, &tv_delta); conn->tv_hashes_done = tv_now; hashes_done(thr, 0x100000000, &tv_delta, NULL); } } static void stratumsrv_mining_hashes_done(struct bufferevent * const bev, json_t * const params, const char * const idstr, struct stratumsrv_conn * const conn) { double f; struct timeval tv_delta; struct cgpu_info *cgpu; struct thr_info *thr; struct proxy_client * const client = stratumsrv_find_or_create_client(__json_array_string(params, 0)); json_t *jduration = json_array_get(params, 1); json_t *jhashcount = json_array_get(params, 2); if (!(json_is_number(jduration) && json_is_number(jhashcount))) return_stratumsrv_failure(20, "mining.hashes_done(String username, Number duration-in-seconds, Number hashcount)"); cgpu = client->cgpu; thr = cgpu->thr[0]; f = json_number_value(jduration); tv_delta.tv_sec = f; tv_delta.tv_usec = (f - tv_delta.tv_sec) * 1e6; f = json_number_value(jhashcount); hashes_done(thr, f, &tv_delta, NULL); conn->hashes_done_ext = true; } static bool stratumsrv_process_line(struct bufferevent * const bev, const char * const ln, void * const p) { struct stratumsrv_conn *conn = p; json_error_t jerr; json_t *json, *params, *j2; const char *method; char *idstr; json = JSON_LOADS(ln, &jerr); if (!json) { applog(LOG_ERR, "SSM: JSON parse error: %s", ln); return false; } method = bfg_json_obj_string(json, "method", NULL); if (!method) { applog(LOG_ERR, "SSM: JSON missing method: %s", ln); return false; } params = json_object_get(json, "params"); if (!params) { applog(LOG_ERR, "SSM: JSON missing params: %s", ln); return false; } applog(LOG_DEBUG, "SSM: RECV: %s", ln); j2 = json_object_get(json, "id"); idstr = (j2 && !json_is_null(j2)) ? json_dumps_ANY(j2, 0) : NULL; if (!strcasecmp(method, "mining.submit")) stratumsrv_mining_submit(bev, params, idstr, conn); else if (!strcasecmp(method, "mining.hashes_done")) stratumsrv_mining_hashes_done(bev, params, idstr, conn); else if (!strcasecmp(method, "mining.authorize")) stratumsrv_mining_authorize(bev, params, idstr, &conn->xnonce1_le); else if (!strcasecmp(method, "mining.subscribe")) stratumsrv_mining_subscribe(bev, params, idstr, &conn->xnonce1_le); else _stratumsrv_failure(bev, idstr, -3, "Method not supported"); free(idstr); return true; } static void stratumsrv_client_close(struct stratumsrv_conn * const conn) { struct bufferevent * const bev = conn->bev; bufferevent_free(bev); LL_DELETE(_ssm_connections, conn); free(conn); } static void stratumsrv_read(struct bufferevent *bev, void *p) { struct evbuffer *input = bufferevent_get_input(bev); char *ln; bool rv; while ( (ln = evbuffer_readln(input, NULL, EVBUFFER_EOL_ANY)) ) { rv = stratumsrv_process_line(bev, ln, p); free(ln); if (unlikely(!rv)) { stratumsrv_client_close(p); break; } } } static void stratumsrv_event(struct bufferevent *bev, short events, void *p) { if (events & (BEV_EVENT_EOF | BEV_EVENT_ERROR)) { if (events & BEV_EVENT_ERROR) applog(LOG_ERR, "Error from bufferevent"); if (events & BEV_EVENT_EOF) applog(LOG_DEBUG, "EOF from bufferevent"); stratumsrv_client_close(p); } } static void stratumlistener(struct evconnlistener *listener, evutil_socket_t sock, struct sockaddr *addr, int len, void *p) { struct stratumsrv_conn *conn; struct event_base *evbase = evconnlistener_get_base(listener); struct bufferevent *bev = bufferevent_socket_new(evbase, sock, BEV_OPT_CLOSE_ON_FREE); conn = malloc(sizeof(*conn)); *conn = (struct stratumsrv_conn){ .bev = bev, }; LL_PREPEND(_ssm_connections, conn); bufferevent_setcb(bev, stratumsrv_read, NULL, stratumsrv_event, conn); bufferevent_enable(bev, EV_READ | EV_WRITE); } void stratumsrv_start(); void stratumsrv_change_port() { struct event_base * const evbase = _smm_evbase; if (_smm_listener) evconnlistener_free(_smm_listener); if (!_smm_running) { stratumsrv_start(); return; } struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = INADDR_ANY, .sin_port = htons(stratumsrv_port), }; _smm_listener = evconnlistener_new_bind(evbase, stratumlistener, NULL, ( LEV_OPT_CLOSE_ON_FREE | LEV_OPT_CLOSE_ON_EXEC | LEV_OPT_REUSEABLE ), 0x10, (void*)&sin, sizeof(sin)); } static void *stratumsrv_thread(__maybe_unused void *p) { pthread_detach(pthread_self()); RenameThread("stratumsrv"); for (uint64_t n = MAX_CLIENTS; n; n >>= 8) ++_ssm_client_octets; _ssm_client_xnonce2sz = 2; struct event_base *evbase = event_base_new(); _smm_evbase = evbase; { ev_notify = evtimer_new(evbase, _stratumsrv_update_notify, NULL); _stratumsrv_update_notify(-1, 0, NULL); } { notifier_init(_ssm_update_notifier); struct event *ev_update_notifier = event_new(evbase, _ssm_update_notifier[0], EV_READ | EV_PERSIST, _stratumsrv_update_notify, NULL); event_add(ev_update_notifier, NULL); } stratumsrv_change_port(); event_base_dispatch(evbase); return NULL; } void stratumsrv_start() { _smm_running = true; pthread_t pth; if (unlikely(pthread_create(&pth, NULL, stratumsrv_thread, NULL))) quit(1, "stratumsrv thread create failed"); }bfgminer-bfgminer-3.10.0/driver-twinfury.c000066400000000000000000000334721226556647300205300ustar00rootroot00000000000000/* * Copyright 2013 Andreas Auer * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ /* * Twin Bitfury USB miner with two Bitfury ASIC */ #include "config.h" #include "miner.h" #include "logging.h" #include "util.h" #include "libbitfury.h" #include "lowlevel.h" #include "lowl-vcom.h" #include "deviceapi.h" #include "sha2.h" #include "driver-twinfury.h" #include #include #include #include BFG_REGISTER_DRIVER(twinfury_drv) static const uint8_t PREAMBLE[] = { 0xDE, 0xAD, 0xBE, 0xEF }; //------------------------------------------------------------------------------ static bool twinfury_send_command(const int fd, const void * const tx, const uint16_t tx_size) { if(4 != write(fd, PREAMBLE, 4)) { return false; } if(tx_size != write(fd, tx, tx_size)) { return false; } return true; } //------------------------------------------------------------------------------ static int16_t twinfury_wait_response(const int fd, const void * const rx, const uint16_t rx_size) { int16_t rx_len; int timeout = 20; while(timeout > 0) { rx_len = serial_read(fd, rx, rx_size); if(rx_len > 0) break; timeout--; } if(unlikely(timeout == 0)) { return -1; } return rx_len; } //------------------------------------------------------------------------------ static bool twinfury_detect_custom(const char *devpath, struct device_drv *api, struct twinfury_info *info) { int fd = serial_open(devpath, info->baud, 1, true); if(fd < 0) { return false; } char buf[1024]; int16_t len; applog(LOG_DEBUG, "%s: Probing for Twinfury device %s", twinfury_drv.dname, devpath); serial_read(fd, buf, sizeof(buf)); if (!twinfury_send_command(fd, "I", 1)) { applog(LOG_DEBUG, "%s: Failed writing id request to %s", twinfury_drv.dname, devpath); serial_close(fd); return false; } len = twinfury_wait_response(fd, buf, sizeof(buf)); if(len != 29) { applog(LOG_DEBUG, "%s: Not a valid response from device (%d)", twinfury_drv.dname, len); serial_close(fd); return false; } info->id.version = buf[1]; memcpy(info->id.product, buf+2, 16); bin2hex(info->id.serial, buf+18, 11); applog(LOG_DEBUG, "%s: %s: %d, %s %s", twinfury_drv.dname, devpath, info->id.version, info->id.product, info->id.serial); char buf_state[sizeof(struct twinfury_state)+1]; if (!twinfury_send_command(fd, "R", 1)) { applog(LOG_DEBUG, "%s: Failed writing reset request to %s", twinfury_drv.dname, devpath); serial_close(fd); return false; } len = 0; while(len == 0) { len = serial_read(fd, buf, sizeof(buf_state)); cgsleep_ms(100); } serial_close(fd); if(len != 8) { applog(LOG_DEBUG, "%s: %s not responding to reset: %d", twinfury_drv.dname, devpath, len); return false; } if (serial_claim_v(devpath, api)) return false; struct cgpu_info *bigpic; bigpic = calloc(1, sizeof(struct cgpu_info)); bigpic->drv = api; bigpic->device_path = strdup(devpath); bigpic->device_fd = -1; bigpic->threads = 1; bigpic->procs = 2; add_cgpu(bigpic); applog(LOG_INFO, "Found %"PRIpreprv" at %s", bigpic->proc_repr, devpath); applog(LOG_DEBUG, "%"PRIpreprv": Init: baud=%d", bigpic->proc_repr, info->baud); bigpic->device_data = info; return true; } //------------------------------------------------------------------------------ static bool twinfury_detect_one(const char *devpath) { struct twinfury_info *info = calloc(1, sizeof(struct twinfury_info)); if (unlikely(!info)) quit(1, "Failed to malloc bigpicInfo"); info->baud = BPM_BAUD; if (!twinfury_detect_custom(devpath, &twinfury_drv, info)) { free(info); return false; } return true; } //------------------------------------------------------------------------------ static bool twinfury_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_product(info, "Twinfury"); } //------------------------------------------------------------------------------ static bool twinfury_lowl_probe(const struct lowlevel_device_info * const info) { return vcom_lowl_probe_wrapper(info, twinfury_detect_one); } //------------------------------------------------------------------------------ static bool twinfury_init(struct thr_info *thr) { struct cgpu_info * const cgpu = thr->cgpu; struct twinfury_info *info = (struct twinfury_info *)cgpu->device_data; struct cgpu_info *proc; int i=0; applog(LOG_DEBUG, "%"PRIpreprv": init", cgpu->proc_repr); for(i=1, proc = cgpu->next_proc; proc; proc = proc->next_proc, i++) { struct twinfury_info *data = calloc(1, sizeof(struct twinfury_info)); proc->device_data = data; data->tx_buffer[0] = 'W'; data->tx_buffer[1] = i; } int fd = serial_open(cgpu->device_path, info->baud, 1, true); if (unlikely(-1 == fd)) { applog(LOG_ERR, "%"PRIpreprv": Failed to open %s", cgpu->proc_repr, cgpu->device_path); return false; } cgpu->device_fd = fd; applog(LOG_INFO, "%"PRIpreprv": Opened %s", cgpu->proc_repr, cgpu->device_path); info->tx_buffer[0] = 'W'; info->tx_buffer[1] = 0x00; if(info->id.version == 2) { char buf[8] = "V\x00\x00"; if(twinfury_send_command(fd, buf, 3)) { if(8 == twinfury_wait_response(fd, buf, 8)) { info->voltage = (buf[4] & 0xFF); info->voltage |= (buf[5] << 8); applog(LOG_DEBUG, "%"PRIpreprv": Voltage: %dmV", cgpu->dev_repr, info->voltage); if(info->voltage < 800 || info->voltage > 950) { info->voltage = 0; } } else { applog(LOG_ERR, "%"PRIpreprv": Failed to get voltage.", cgpu->dev_repr); info->voltage = 0; } } else { applog(LOG_ERR, "%"PRIpreprv": Failed to send voltage request", cgpu->dev_repr); } } timer_set_now(&thr->tv_poll); return true; } //------------------------------------------------------------------------------ static bool twinfury_process_results(struct cgpu_info * const proc) { struct twinfury_info *device = proc->device_data; uint8_t *rx_buffer = device->rx_buffer; int16_t rx_len = device->rx_len; struct work *work = proc->thr[0]->work; if(rx_len == 0 || rx_len == -1) { return false; } if(rx_buffer[3] == 0) { return false; } if(!work) { return true; } uint32_t m7 = *((uint32_t *)&work->data[64]); uint32_t ntime = *((uint32_t *)&work->data[68]); uint32_t nbits = *((uint32_t *)&work->data[72]); int j=0; for(j=0; jproc_repr, (unsigned long)rx_len, rx_buffer[j], state.chip, state.state, state.switched, (unsigned long)nonce); if (bitfury_fudge_nonce(work->midstate, m7, ntime, nbits, &nonce)) submit_nonce(proc->thr[0], work, nonce); else inc_hw_errors(proc->thr[0], work, nonce); } } return true; } //------------------------------------------------------------------------------ int64_t twinfury_job_process_results(struct thr_info *thr, struct work *work, bool stopping) { // Bitfury chips process only 768/1024 of the nonce range return 0xbd000000; } //------------------------------------------------------------------------------ static bool twinfury_job_prepare(struct thr_info *thr, struct work *work, __maybe_unused uint64_t max_nonce) { struct cgpu_info *board = thr->cgpu; struct twinfury_info *info = (struct twinfury_info *)board->device_data; memcpy(&info->tx_buffer[ 2], work->midstate, 32); memcpy(&info->tx_buffer[34], &work->data[64], 12); work->blk.nonce = 0xffffffff; return true; } //------------------------------------------------------------------------------ static void twinfury_poll(struct thr_info *thr) { struct cgpu_info * const dev = thr->cgpu; struct twinfury_info *info = dev->device_data; uint8_t n_chips = 0; uint8_t buffer[2] = { 'Q', 0 }; uint8_t response[8]; bool flashed = false; if(info->send_voltage) { char buf[8] = "V"; buf[1] = info->voltage & 0xFF; buf[2] = (info->voltage >> 8) & 0xFF; if(!twinfury_send_command(dev->device_fd, buf, 3)) applog(LOG_ERR, "%s: Failed supply voltage", dev->dev_repr); else if(8 != twinfury_wait_response(dev->device_fd, buf, 8)) { applog(LOG_ERR, "%s: Waiting for response timed out (Supply voltage)", dev->dev_repr); } else { info->voltage = (buf[4] & 0xFF); info->voltage |= (buf[5] << 8); } info->send_voltage = false; } for (struct cgpu_info *proc = dev; proc; proc = proc->next_proc, ++n_chips) { struct twinfury_info *info = (struct twinfury_info *)proc->device_data; if (proc->flash_led) { if (flashed) proc->flash_led = 0; else { char buf[] = "L"; if(!twinfury_send_command(dev->device_fd, buf, 1)) applog(LOG_ERR, "%s: Failed writing flash LED", dev->dev_repr); else if(1 != twinfury_wait_response(dev->device_fd, buf, 1)) applog(LOG_ERR, "%s: Waiting for response timed out (Flash LED)", dev->dev_repr); else { flashed = true; proc->flash_led = 0; } } } buffer[1] = n_chips; if(!twinfury_send_command(dev->device_fd, buffer, 2)) { applog(LOG_ERR, "%"PRIpreprv": Failed writing work task", proc->proc_repr); dev_error(dev, REASON_DEV_COMMS_ERROR); return; } info->rx_len = twinfury_wait_response(dev->device_fd, info->rx_buffer, sizeof(info->rx_buffer)); if(unlikely(info->rx_len == -1)) { applog(LOG_ERR, "%"PRIpreprv": Query timeout", proc->proc_repr); } if(twinfury_process_results(proc) == true) { struct thr_info *proc_thr = proc->thr[0]; mt_job_transition(proc_thr); // TODO: Delay morework until right before it's needed timer_set_now(&proc_thr->tv_morework); job_start_complete(proc_thr); } } buffer[0] = 'T'; if(twinfury_send_command(dev->device_fd, buffer, 1)) { if(8 == twinfury_wait_response(dev->device_fd, response, 8)) { if(response[0] == buffer[0]) { const float temp = ((uint16_t)response[4] | (uint16_t)(response[5] << 8)) / 10.0; if (opt_dev_protocol && opt_debug) { char hex[93]; bin2hex(hex, response, 8); applog(LOG_DEBUG, "%"PRIpreprv": TEMP: %s", dev->dev_repr, hex); } for (struct cgpu_info *proc = dev; proc; proc = proc->next_proc) proc->temp = temp; applog(LOG_DEBUG, "%"PRIpreprv": Temperature: %f", dev->dev_repr, temp); } } else { applog(LOG_DEBUG, "%"PRIpreprv": No temperature response", dev->dev_repr); } } timer_set_delay_from_now(&thr->tv_poll, 250000); } //------------------------------------------------------------------------------ static void twinfury_job_start(struct thr_info *thr) { struct cgpu_info *board = thr->cgpu; struct twinfury_info *info = (struct twinfury_info *)board->device_data; int device_fd = thr->cgpu->device->device_fd; uint8_t buffer[8]; int16_t len; if (opt_dev_protocol && opt_debug) { char hex[93]; bin2hex(hex, info->tx_buffer, 46); applog(LOG_DEBUG, "%"PRIpreprv": SEND: %s", board->proc_repr, hex); } if(!twinfury_send_command(device_fd, info->tx_buffer, 46)) { applog(LOG_ERR, "%"PRIpreprv": Failed writing work task", board->proc_repr); dev_error(board, REASON_DEV_COMMS_ERROR); job_start_abort(thr, true); return; } len = twinfury_wait_response(device_fd, buffer, 8); if(unlikely(len == -1)) { applog(LOG_ERR, "%"PRIpreprv": Work send timeout.", board->proc_repr); } } //------------------------------------------------------------------------------ static void twinfury_shutdown(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; serial_close(cgpu->device_fd); } //------------------------------------------------------------------------------ static bool twinfury_identify(struct cgpu_info *cgpu) { cgpu->flash_led = 1; return true; } #ifdef HAVE_CURSES void twinfury_tui_wlogprint_choices(struct cgpu_info * const proc) { struct twinfury_info * const state = proc->device->device_data; if(state->id.version > 1) { wlogprint("[V]oltage "); } } const char *twinfury_tui_handle_choice(struct cgpu_info * const proc, const int input) { struct twinfury_info * const state = proc->device->device_data; if(state->id.version > 1) { switch (input) { case 'v': case 'V': { const int val = curses_int("Set supply voltage (range 800mV-950mV; slow to fast)"); if (val < 800 || val > 950) return "Invalid supply voltage value\n"; state->voltage = val; state->send_voltage = true; return "Supply voltage changing\n"; } } } return NULL; } void twinfury_wlogprint_status(struct cgpu_info * const proc) { const struct twinfury_info * const state = proc->device->device_data; if(state->id.version > 1) wlogprint("Supply voltage: %dmV\n", state->voltage); } #endif //------------------------------------------------------------------------------ struct device_drv twinfury_drv = { //lowercase driver name so --scan pattern matching works .dname = "twinfury", .name = "TBF", .probe_priority = -111, .lowl_match = twinfury_lowl_match, .lowl_probe = twinfury_lowl_probe, .identify_device = twinfury_identify, .thread_init = twinfury_init, .minerloop = minerloop_async, .job_prepare = twinfury_job_prepare, .job_start = twinfury_job_start, .poll = twinfury_poll, .job_process_results = twinfury_job_process_results, .thread_shutdown = twinfury_shutdown, #ifdef HAVE_CURSES .proc_wlogprint_status = twinfury_wlogprint_status, .proc_tui_wlogprint_choices = twinfury_tui_wlogprint_choices, .proc_tui_handle_choice = twinfury_tui_handle_choice, #endif }; bfgminer-bfgminer-3.10.0/driver-twinfury.h000066400000000000000000000011451226556647300205250ustar00rootroot00000000000000#ifndef BFG_DRIVER_TWINFURY_H #define BFG_DRIVER_TWINFURY_H #define BPM_BAUD 115200 #define NUM_BITFURY_CHIPS 2 struct twinfury_identity { uint8_t version; char product[16]; char serial[23]; } __attribute__((packed)); struct twinfury_state { uint8_t chip; uint8_t state; uint8_t switched; uint32_t nonce; } __attribute__((packed)); struct twinfury_info { uint32_t baud; struct work *prev_work; struct work *work; bool work_sent; struct twinfury_identity id; uint8_t tx_buffer[46]; uint8_t rx_buffer[1024]; int16_t rx_len; uint32_t voltage; bool send_voltage; }; #endif bfgminer-bfgminer-3.10.0/driver-x6500.c000066400000000000000000000537471226556647300174320ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2012 Andrew Smith * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include "binloader.h" #include "compat.h" #include "deviceapi.h" #include "dynclock.h" #include "jtag.h" #include "logging.h" #include "miner.h" #include "ft232r.h" #include "lowlevel.h" #include "lowl-usb.h" #define X6500_USB_PRODUCT "X6500 FPGA Miner" #define X6500_BITSTREAM_FILENAME "fpgaminer_x6500-overclocker-0402.bit" // NOTE: X6500_BITSTREAM_USERID is bitflipped #define X6500_BITSTREAM_USERID "\x40\x20\x24\x42" #define X6500_MINIMUM_CLOCK 2 #define X6500_DEFAULT_CLOCK 190 #define X6500_MAXIMUM_CLOCK 250 BFG_REGISTER_DRIVER(x6500_api) #define fromlebytes(ca, j) (ca[j] | (((uint16_t)ca[j+1])<<8) | (((uint32_t)ca[j+2])<<16) | (((uint32_t)ca[j+3])<<24)) static void int2bits(uint32_t n, uint8_t *b, uint8_t bits) { uint8_t i; for (i = (bits + 7) / 8; i > 0; ) b[--i] = 0; for (i = 0; i < bits; ++i) { if (n & 1) b[i/8] |= 0x80 >> (i % 8); n >>= 1; } } static uint32_t bits2int(uint8_t *b, uint8_t bits) { uint32_t n, i; n = 0; for (i = 0; i < bits; ++i) if (b[i/8] & (0x80 >> (i % 8))) n |= 1<> (i % 8))) ? 1 : 0; if (checksum) b[i/8] |= 0x80 >> (i % 8); } static void x6500_jtag_set(struct jtag_port *jp, uint8_t pinoffset) { jp->tck = pinoffset << 3; jp->tms = pinoffset << 2; jp->tdi = pinoffset << 1; jp->tdo = pinoffset << 0; jp->ignored = ~(jp->tdo | jp->tdi | jp->tms | jp->tck); } static uint32_t x6500_get_register(struct jtag_port *jp, uint8_t addr); static void x6500_set_register(struct jtag_port *jp, uint8_t addr, uint32_t nv) { uint8_t buf[38]; retry: jtag_write(jp, JTAG_REG_IR, "\x40", 6); int2bits(nv, &buf[0], 32); int2bits(addr, &buf[4], 4); buf[4] |= 8; checksum(buf, 37); jtag_write(jp, JTAG_REG_DR, buf, 38); jtag_run(jp); #ifdef DEBUG_X6500_SET_REGISTER if (x6500_get_register(jp, addr) != nv) #else if (0) #endif { applog(LOG_WARNING, "x6500_set_register failed %x=%08x", addr, nv); goto retry; } } static uint32_t x6500_get_register(struct jtag_port *jp, uint8_t addr) { uint8_t buf[4] = {0}; jtag_write(jp, JTAG_REG_IR, "\x40", 6); int2bits(addr, &buf[0], 4); checksum(buf, 5); jtag_write(jp, JTAG_REG_DR, buf, 6); jtag_read (jp, JTAG_REG_DR, buf, 32); jtag_reset(jp); return bits2int(buf, 32); } static bool x6500_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_lowlproduct(info, &lowl_ft232r, X6500_USB_PRODUCT); } static bool x6500_lowl_probe(const struct lowlevel_device_info * const info) { const char * const product = info->product; const char * const serial = info->serial; if (info->lowl != &lowl_ft232r) { if (info->lowl != &lowl_usb) applog(LOG_DEBUG, "%s: Matched \"%s\" serial \"%s\", but lowlevel driver is not ft232r!", __func__, product, serial); return false; } libusb_device * const dev = info->lowl_data; if (bfg_claim_libusb(&x6500_api, true, dev)) return false; struct cgpu_info *x6500; x6500 = calloc(1, sizeof(*x6500)); x6500->drv = &x6500_api; x6500->device_path = strdup(serial); x6500->deven = DEV_ENABLED; x6500->threads = 1; x6500->procs = 2; x6500->name = strdup(product); x6500->cutofftemp = 85; x6500->device_data = lowlevel_ref(info); cgpu_copy_libusb_strings(x6500, dev); return add_cgpu(x6500); } static bool x6500_prepare(struct thr_info *thr) { struct cgpu_info *x6500 = thr->cgpu; if (x6500->proc_id) return true; struct ft232r_device_handle *ftdi = ft232r_open(x6500->device_data); lowlevel_devinfo_free(x6500->device_data); x6500->device_ft232r = NULL; if (!ftdi) return false; if (!ft232r_set_bitmode(ftdi, 0xee, 4)) return false; if (!ft232r_purge_buffers(ftdi, FTDI_PURGE_BOTH)) return false; x6500->device_ft232r = ftdi; struct jtag_port_a *jtag_a; unsigned char *pdone = calloc(1, sizeof(*jtag_a) + 1); *pdone = 101; jtag_a = (void*)(pdone + 1); jtag_a->ftdi = ftdi; x6500->device_data = jtag_a; for (struct cgpu_info *slave = x6500->next_proc; slave; slave = slave->next_proc) { slave->device_ft232r = x6500->device_ft232r; slave->device_data = x6500->device_data; } return true; } struct x6500_fpga_data { struct jtag_port jtag; struct timeval tv_hashstart; int64_t hashes_left; struct dclk_data dclk; uint8_t freqMaxMaxM; // Time the clock was last reduced due to temperature struct timeval tv_last_cutoff_reduced; uint32_t prepwork_last_register; }; #define bailout2(...) do { \ applog(__VA_ARGS__); \ return false; \ } while(0) static bool x6500_fpga_upload_bitstream(struct cgpu_info *x6500, struct jtag_port *jp1) { char buf[0x100]; unsigned long len, flen; unsigned char *pdone = (unsigned char*)x6500->device_data - 1; struct ft232r_device_handle *ftdi = jp1->a->ftdi; FILE *f = open_xilinx_bitstream(x6500->drv->dname, x6500->dev_repr, X6500_BITSTREAM_FILENAME, &len); if (!f) return false; flen = len; applog(LOG_WARNING, "%s: Programming %s...", x6500->dev_repr, x6500->device_path); x6500->status = LIFE_INIT2; // "Magic" jtag_port configured to access both FPGAs concurrently struct jtag_port jpt = { .a = jp1->a, }; struct jtag_port *jp = &jpt; uint8_t i, j; x6500_jtag_set(jp, 0x11); // Need to reset here despite previous FPGA state, since we are programming all at once jtag_reset(jp); jtag_write(jp, JTAG_REG_IR, "\xd0", 6); // JPROGRAM // Poll each FPGA status individually since they might not be ready at the same time for (j = 0; j < 2; ++j) { x6500_jtag_set(jp, j ? 0x10 : 1); do { i = 0xd0; // Re-set JPROGRAM while reading status jtag_read(jp, JTAG_REG_IR, &i, 6); } while (i & 8); applog(LOG_DEBUG, "%s%c: JPROGRAM ready", x6500->dev_repr, 'a' + j); } x6500_jtag_set(jp, 0x11); jtag_write(jp, JTAG_REG_IR, "\xa0", 6); // CFG_IN cgsleep_ms(1000); if (fread(buf, 32, 1, f) != 1) bailout2(LOG_ERR, "%s: File underrun programming %s (%lu bytes left)", x6500->dev_repr, x6500->device_path, len); jtag_swrite(jp, JTAG_REG_DR, buf, 256); len -= 32; // Put ft232r chip in asynchronous bitbang mode so we don't need to read back tdo // This takes upload time down from about an hour to about 3 minutes if (!ft232r_set_bitmode(ftdi, 0xee, 1)) return false; if (!ft232r_purge_buffers(ftdi, FTDI_PURGE_BOTH)) return false; jp->a->bufread = 0; jp->a->async = true; ssize_t buflen; char nextstatus = 25; while (len) { buflen = len < 32 ? len : 32; if (fread(buf, buflen, 1, f) != 1) bailout2(LOG_ERR, "%s: File underrun programming %s (%lu bytes left)", x6500->dev_repr, x6500->device_path, len); jtag_swrite_more(jp, buf, buflen * 8, len == (unsigned long)buflen); *pdone = 100 - ((len * 100) / flen); if (*pdone >= nextstatus) { nextstatus += 25; applog(LOG_WARNING, "%s: Programming %s... %d%% complete...", x6500->dev_repr, x6500->device_path, *pdone); } len -= buflen; } // Switch back to synchronous bitbang mode if (!ft232r_set_bitmode(ftdi, 0xee, 4)) return false; if (!ft232r_purge_buffers(ftdi, FTDI_PURGE_BOTH)) return false; jp->a->bufread = 0; jp->a->async = false; jp->a->bufread = 0; jtag_write(jp, JTAG_REG_IR, "\x30", 6); // JSTART for (i=0; i<16; ++i) jtag_run(jp); i = 0xff; // BYPASS jtag_read(jp, JTAG_REG_IR, &i, 6); if (!(i & 4)) return false; applog(LOG_WARNING, "%s: Done programming %s", x6500->dev_repr, x6500->device_path); *pdone = 101; return true; } static bool x6500_change_clock(struct thr_info *thr, int multiplier) { struct x6500_fpga_data *fpga = thr->cgpu_data; struct jtag_port *jp = &fpga->jtag; x6500_set_register(jp, 0xD, multiplier * 2); ft232r_flush(jp->a->ftdi); fpga->dclk.freqM = multiplier; return true; } static bool x6500_dclk_change_clock(struct thr_info *thr, int multiplier) { struct cgpu_info *x6500 = thr->cgpu; struct x6500_fpga_data *fpga = thr->cgpu_data; uint8_t oldFreq = fpga->dclk.freqM; if (!x6500_change_clock(thr, multiplier)) { return false; } dclk_msg_freqchange(x6500->proc_repr, oldFreq * 2, fpga->dclk.freqM * 2, NULL); return true; } static bool x6500_thread_init(struct thr_info *thr) { struct cgpu_info *x6500 = thr->cgpu; struct ft232r_device_handle *ftdi = x6500->device_ft232r; cgpu_setup_control_requests(x6500); // This works because x6500_thread_init is only called for the first processor now that they're all using the same thread for ( ; x6500; x6500 = x6500->next_proc) { thr = x6500->thr[0]; struct x6500_fpga_data *fpga; struct jtag_port *jp; int fpgaid = x6500->proc_id; uint8_t pinoffset = fpgaid ? 0x10 : 1; unsigned char buf[4] = {0}; int i; if (!ftdi) return false; fpga = calloc(1, sizeof(*fpga)); jp = &fpga->jtag; jp->a = x6500->device_data; x6500_jtag_set(jp, pinoffset); thr->cgpu_data = fpga; x6500->status = LIFE_INIT2; if (!jtag_reset(jp)) { applog(LOG_ERR, "%s: JTAG reset failed", x6500->dev_repr); return false; } i = jtag_detect(jp); if (i != 1) { applog(LOG_ERR, "%s: JTAG detect returned %d", x6500->dev_repr, i); return false; } if (!(1 && jtag_write(jp, JTAG_REG_IR, "\x10", 6) && jtag_read (jp, JTAG_REG_DR, buf, 32) && jtag_reset(jp) )) { applog(LOG_ERR, "%s: JTAG error reading user code", x6500->dev_repr); return false; } if (memcmp(buf, X6500_BITSTREAM_USERID, 4)) { applog(LOG_ERR, "%"PRIprepr": FPGA not programmed", x6500->proc_repr); if (!x6500_fpga_upload_bitstream(x6500, jp)) return false; } else if (opt_force_dev_init && x6500 == x6500->device) { applog(LOG_DEBUG, "%"PRIprepr": FPGA is already programmed, but --force-dev-init is set", x6500->proc_repr); if (!x6500_fpga_upload_bitstream(x6500, jp)) return false; } else applog(LOG_DEBUG, "%s"PRIprepr": FPGA is already programmed :)", x6500->proc_repr); dclk_prepare(&fpga->dclk); fpga->dclk.freqMinM = X6500_MINIMUM_CLOCK / 2; x6500_change_clock(thr, X6500_DEFAULT_CLOCK / 2); for (i = 0; 0xffffffff != x6500_get_register(jp, 0xE); ++i) {} if (i) applog(LOG_WARNING, "%"PRIprepr": Flushed %d nonces from buffer at init", x6500->proc_repr, i); fpga->dclk.minGoodSamples = 3; fpga->freqMaxMaxM = fpga->dclk.freqMaxM = X6500_MAXIMUM_CLOCK / 2; fpga->dclk.freqMDefault = fpga->dclk.freqM; applog(LOG_WARNING, "%"PRIprepr": Frequency set to %u MHz (range: %u-%u)", x6500->proc_repr, fpga->dclk.freqM * 2, X6500_MINIMUM_CLOCK, fpga->dclk.freqMaxM * 2); } return true; } static void x6500_get_temperature(struct cgpu_info *x6500) { struct x6500_fpga_data *fpga = x6500->thr[0]->cgpu_data; struct jtag_port *jp = &fpga->jtag; struct ft232r_device_handle *ftdi = jp->a->ftdi; int i, code[2]; bool sio[2]; code[0] = 0; code[1] = 0; ft232r_flush(ftdi); if (!(ft232r_set_cbus_bits(ftdi, false, true))) return; if (!(ft232r_set_cbus_bits(ftdi, true, true))) return; if (!(ft232r_set_cbus_bits(ftdi, false, true))) return; if (!(ft232r_set_cbus_bits(ftdi, true, true))) return; if (!(ft232r_set_cbus_bits(ftdi, false, false))) return; for (i = 16; i--; ) { if (ft232r_set_cbus_bits(ftdi, true, false)) { if (!(ft232r_get_cbus_bits(ftdi, &sio[0], &sio[1]))) { return; } } else { return; } code[0] |= sio[0] << i; code[1] |= sio[1] << i; if (!ft232r_set_cbus_bits(ftdi, false, false)) { return; } } if (!(ft232r_set_cbus_bits(ftdi, false, true))) { return; } if (!(ft232r_set_cbus_bits(ftdi, true, true))) { return; } if (!(ft232r_set_cbus_bits(ftdi, false, true))) { return; } if (!ft232r_set_bitmode(ftdi, 0xee, 4)) { return; } ft232r_purge_buffers(jp->a->ftdi, FTDI_PURGE_BOTH); jp->a->bufread = 0; x6500 = x6500->device; for (i = 0; i < 2; ++i, x6500 = x6500->next_proc) { struct thr_info *thr = x6500->thr[0]; fpga = thr->cgpu_data; if (!fpga) continue; if (code[i] == 0xffff || !code[i]) { x6500->temp = 0; continue; } if ((code[i] >> 15) & 1) code[i] -= 0x10000; x6500->temp = (float)(code[i] >> 2) * 0.03125f; applog(LOG_DEBUG,"x6500_get_temperature: fpga[%d]->temp=%.1fC", i, x6500->temp); int temperature = round(x6500->temp); if (temperature > x6500->targettemp + opt_hysteresis) { struct timeval now; cgtime(&now); if (timer_elapsed(&fpga->tv_last_cutoff_reduced, &now)) { fpga->tv_last_cutoff_reduced = now; int oldFreq = fpga->dclk.freqM; if (x6500_change_clock(thr, oldFreq - 1)) applog(LOG_NOTICE, "%"PRIprepr": Frequency dropped from %u to %u MHz (temp: %.1fC)", x6500->proc_repr, oldFreq * 2, fpga->dclk.freqM * 2, x6500->temp ); fpga->dclk.freqMaxM = fpga->dclk.freqM; } } else if (fpga->dclk.freqMaxM < fpga->freqMaxMaxM && temperature < x6500->targettemp) { if (temperature < x6500->targettemp - opt_hysteresis) { fpga->dclk.freqMaxM = fpga->freqMaxMaxM; } else if (fpga->dclk.freqM == fpga->dclk.freqMaxM) { ++fpga->dclk.freqMaxM; } } } } static bool x6500_all_idle(struct cgpu_info *any_proc) { for (struct cgpu_info *proc = any_proc->device; proc; proc = proc->next_proc) if (proc->thr[0]->tv_poll.tv_sec != -1 || proc->deven == DEV_ENABLED) return false; return true; } static bool x6500_get_stats(struct cgpu_info *x6500) { if (x6500_all_idle(x6500)) { struct cgpu_info *cgpu = x6500->device; // Getting temperature more efficiently while running cgpu_request_control(cgpu); x6500_get_temperature(x6500); cgpu_release_control(cgpu); } return true; } static bool get_x6500_upload_percent(char *buf, size_t bufsz, struct cgpu_info *x6500, __maybe_unused bool per_processor) { unsigned char pdone = *((unsigned char*)x6500->device_data - 1); if (pdone != 101) { tailsprintf(buf, bufsz, "%3d%% ", pdone); return true; } return false; } static struct api_data* get_x6500_api_extra_device_status(struct cgpu_info *x6500) { struct api_data *root = NULL; struct thr_info *thr = x6500->thr[0]; struct x6500_fpga_data *fpga = thr->cgpu_data; double d; d = (double)fpga->dclk.freqM * 2; root = api_add_freq(root, "Frequency", &d, true); d = (double)fpga->dclk.freqMaxM * 2; root = api_add_freq(root, "Cool Max Frequency", &d, true); d = (double)fpga->freqMaxMaxM * 2; root = api_add_freq(root, "Max Frequency", &d, true); return root; } static bool x6500_job_prepare(struct thr_info *thr, struct work *work, __maybe_unused uint64_t max_nonce) { struct cgpu_info *x6500 = thr->cgpu; struct x6500_fpga_data *fpga = thr->cgpu_data; struct jtag_port *jp = &fpga->jtag; for (int i = 1, j = 0; i < 9; ++i, j += 4) x6500_set_register(jp, i, fromlebytes(work->midstate, j)); for (int i = 9, j = 64; i < 11; ++i, j += 4) x6500_set_register(jp, i, fromlebytes(work->data, j)); x6500_get_temperature(x6500); ft232r_flush(jp->a->ftdi); fpga->prepwork_last_register = fromlebytes(work->data, 72); work->blk.nonce = 0xffffffff; return true; } static int64_t calc_hashes(struct thr_info *, struct timeval *); static void x6500_job_start(struct thr_info *thr) { struct cgpu_info *x6500 = thr->cgpu; struct x6500_fpga_data *fpga = thr->cgpu_data; struct jtag_port *jp = &fpga->jtag; struct timeval tv_now; if (thr->prev_work) { dclk_preUpdate(&fpga->dclk); dclk_updateFreq(&fpga->dclk, x6500_dclk_change_clock, thr); } x6500_set_register(jp, 11, fpga->prepwork_last_register); ft232r_flush(jp->a->ftdi); timer_set_now(&tv_now); if (!thr->prev_work) fpga->tv_hashstart = tv_now; else if (thr->prev_work != thr->work) calc_hashes(thr, &tv_now); fpga->hashes_left = 0x100000000; mt_job_transition(thr); if (opt_debug) { char xdata[161]; bin2hex(xdata, thr->work->data, 80); applog(LOG_DEBUG, "%"PRIprepr": Started work: %s", x6500->proc_repr, xdata); } uint32_t usecs = 0x80000000 / fpga->dclk.freqM; usecs -= 1000000; timer_set_delay(&thr->tv_morework, &tv_now, usecs); timer_set_delay(&thr->tv_poll, &tv_now, 10000); job_start_complete(thr); } static int64_t calc_hashes(struct thr_info *thr, struct timeval *tv_now) { struct x6500_fpga_data *fpga = thr->cgpu_data; struct timeval tv_delta; int64_t hashes, hashes_left; timersub(tv_now, &fpga->tv_hashstart, &tv_delta); hashes = (((int64_t)tv_delta.tv_sec * 1000000) + tv_delta.tv_usec) * fpga->dclk.freqM * 2; hashes_left = fpga->hashes_left; if (unlikely(hashes > hashes_left)) hashes = hashes_left; fpga->hashes_left -= hashes; hashes_done(thr, hashes, &tv_delta, NULL); fpga->tv_hashstart = *tv_now; return hashes; } static int64_t x6500_process_results(struct thr_info *thr, struct work *work) { struct cgpu_info *x6500 = thr->cgpu; struct x6500_fpga_data *fpga = thr->cgpu_data; struct jtag_port *jtag = &fpga->jtag; struct timeval tv_now; int64_t hashes; uint32_t nonce; bool bad; while (1) { timer_set_now(&tv_now); nonce = x6500_get_register(jtag, 0xE); if (nonce != 0xffffffff) { bad = !(work && test_nonce(work, nonce, false)); if (!bad) { submit_nonce(thr, work, nonce); applog(LOG_DEBUG, "%"PRIprepr": Nonce for current work: %08lx", x6500->proc_repr, (unsigned long)nonce); dclk_gotNonces(&fpga->dclk); } else if (likely(thr->prev_work) && test_nonce(thr->prev_work, nonce, false)) { submit_nonce(thr, thr->prev_work, nonce); applog(LOG_DEBUG, "%"PRIprepr": Nonce for PREVIOUS work: %08lx", x6500->proc_repr, (unsigned long)nonce); } else { inc_hw_errors(thr, work, nonce); dclk_gotNonces(&fpga->dclk); dclk_errorCount(&fpga->dclk, 1.); } // Keep reading nonce buffer until it's empty // This is necessary to avoid getting hw errors from Freq B after we've moved on to Freq A continue; } hashes = calc_hashes(thr, &tv_now); break; } return hashes; } static void x6500_fpga_poll(struct thr_info *thr) { struct x6500_fpga_data *fpga = thr->cgpu_data; x6500_process_results(thr, thr->work); if (unlikely(!fpga->hashes_left)) { mt_disable_start(thr); thr->tv_poll.tv_sec = -1; } else timer_set_delay_from_now(&thr->tv_poll, 10000); } static void x6500_user_set_clock(struct cgpu_info *cgpu, const int val) { struct thr_info * const thr = cgpu->thr[0]; struct x6500_fpga_data *fpga = thr->cgpu_data; const int multiplier = val / 2; fpga->dclk.freqMDefault = multiplier; } static void x6500_user_set_max_clock(struct cgpu_info *cgpu, const int val) { struct thr_info * const thr = cgpu->thr[0]; struct x6500_fpga_data *fpga = thr->cgpu_data; const int multiplier = val / 2; fpga->freqMaxMaxM = fpga->dclk.freqMaxM = multiplier; } static char *x6500_set_device(struct cgpu_info *cgpu, char *option, char *setting, char *replybuf) { int val; if (strcasecmp(option, "help") == 0) { sprintf(replybuf, "clock: range %d-%d and a multiple of 2\nmaxclock: default %d, range %d-%d and a multiple of 2", X6500_MINIMUM_CLOCK, X6500_MAXIMUM_CLOCK, X6500_MAXIMUM_CLOCK, X6500_MINIMUM_CLOCK, X6500_MAXIMUM_CLOCK); return replybuf; } if (strcasecmp(option, "clock") == 0) { if (!setting || !*setting) { sprintf(replybuf, "missing clock setting"); return replybuf; } val = atoi(setting); if (val < X6500_MINIMUM_CLOCK || val > X6500_MAXIMUM_CLOCK || (val & 1) != 0) { sprintf(replybuf, "invalid clock: '%s' valid range %d-%d and a multiple of 2", setting, X6500_MINIMUM_CLOCK, X6500_MAXIMUM_CLOCK); return replybuf; } x6500_user_set_clock(cgpu, val); return NULL; } if (strcasecmp(option, "maxclock") == 0) { if (!setting || !*setting) { sprintf(replybuf, "missing maxclock setting"); return replybuf; } val = atoi(setting); if (val < X6500_MINIMUM_CLOCK || val > X6500_MAXIMUM_CLOCK || (val & 1) != 0) { sprintf(replybuf, "invalid maxclock: '%s' valid range %d-%d and a multiple of 2", setting, X6500_MINIMUM_CLOCK, X6500_MAXIMUM_CLOCK); return replybuf; } x6500_user_set_max_clock(cgpu, val); applog(LOG_NOTICE, "%"PRIpreprv": Maximum frequency reset to %u MHz", cgpu->proc_repr, val ); return NULL; } sprintf(replybuf, "Unknown option: %s", option); return replybuf; } #ifdef HAVE_CURSES static void x6500_tui_wlogprint_choices(struct cgpu_info *cgpu) { wlogprint("[C]lock speed "); } static const char *x6500_tui_handle_choice(struct cgpu_info *cgpu, int input) { static char buf[0x100]; // Static for replies switch (input) { case 'c': case 'C': { int val; char *intvar; sprintf(buf, "Set clock speed (range %d-%d, multiple of 2)", X6500_MINIMUM_CLOCK, X6500_MAXIMUM_CLOCK); intvar = curses_input(buf); if (!intvar) return "Invalid clock speed\n"; val = atoi(intvar); free(intvar); if (val < X6500_MINIMUM_CLOCK || val > X6500_MAXIMUM_CLOCK || (val & 1) != 0) return "Invalid clock speed\n"; x6500_user_set_clock(cgpu, val); return "Clock speed changed\n"; } } return NULL; } static void x6500_wlogprint_status(struct cgpu_info *cgpu) { struct x6500_fpga_data *fpga = cgpu->thr[0]->cgpu_data; wlogprint("Clock speed: %d\n", (int)(fpga->dclk.freqM * 2)); } #endif struct device_drv x6500_api = { .dname = "x6500", .name = "XBS", .lowl_match = x6500_lowl_match, .lowl_probe = x6500_lowl_probe, .thread_prepare = x6500_prepare, .thread_init = x6500_thread_init, .get_stats = x6500_get_stats, .override_statline_temp2 = get_x6500_upload_percent, .get_api_extra_device_status = get_x6500_api_extra_device_status, .set_device = x6500_set_device, #ifdef HAVE_CURSES .proc_wlogprint_status = x6500_wlogprint_status, .proc_tui_wlogprint_choices = x6500_tui_wlogprint_choices, .proc_tui_handle_choice = x6500_tui_handle_choice, #endif .poll = x6500_fpga_poll, .minerloop = minerloop_async, .job_prepare = x6500_job_prepare, .job_start = x6500_job_start, // .thread_shutdown = x6500_fpga_shutdown, }; bfgminer-bfgminer-3.10.0/driver-ztex.c000066400000000000000000000261671226556647300176360ustar00rootroot00000000000000/* * Copyright 2012 nelisky * Copyright 2012-2013 Luke Dashjr * Copyright 2012-2013 Denis Ahrens * Copyright 2012 Xiangfu * * This work is based upon the Java SDK provided by ztex which is * Copyright (C) 2009-2011 ZTEX GmbH. * http://www.ztex.de * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include "miner.h" #include #include #include "deviceapi.h" #include "dynclock.h" #include "libztex.h" #include "lowlevel.h" #include "lowl-usb.h" #include "util.h" #define GOLDEN_BACKLOG 5 BFG_REGISTER_DRIVER(ztex_drv) // Forward declarations static void ztex_disable(struct thr_info* thr); static bool ztex_prepare(struct thr_info *thr); static void ztex_selectFpga(struct libztex_device* ztex, int16_t fpgaNum) { if (ztex->root->numberOfFpgas > 1) { if (ztex->root->selectedFpga != fpgaNum) mutex_lock(&ztex->root->mutex); libztex_selectFpga(ztex, fpgaNum); } } static void ztex_releaseFpga(struct libztex_device* ztex) { if (ztex->root->numberOfFpgas > 1) { ztex->root->selectedFpga = -1; mutex_unlock(&ztex->root->mutex); } } static struct cgpu_info *ztex_setup(struct libztex_device *dev, int fpgacount) { struct cgpu_info *ztex; char *fpganame = (char*)dev->snString; ztex = calloc(1, sizeof(struct cgpu_info)); ztex->drv = &ztex_drv; ztex->device_ztex = dev; ztex->procs = fpgacount; ztex->threads = fpgacount; ztex->dev_manufacturer = dev->dev_manufacturer; ztex->dev_product = dev->dev_product; ztex->dev_serial = (char*)&dev->snString[0]; ztex->name = fpganame; add_cgpu(ztex); strcpy(ztex->device_ztex->repr, ztex->dev_repr); applog(LOG_INFO, "%"PRIpreprv": Found Ztex (ZTEX %s)", ztex->dev_repr, fpganame); return ztex; } static bool ztex_lowl_match(const struct lowlevel_device_info * const info) { return lowlevel_match_lowlproduct(info, &lowl_usb, "btcminer for ZTEX"); } static bool ztex_lowl_probe(const struct lowlevel_device_info * const info) { const char * const product = info->product; const char * const serial = info->serial; if (info->lowl != &lowl_usb) { applog(LOG_DEBUG, "%s: Matched \"%s\" serial \"%s\", but lowlevel driver is not usb!", __func__, product, serial); return false; } libusb_device * const usbdev = info->lowl_data; const enum ztex_check_result err = libztex_checkDevice(usbdev); switch (err) { case CHECK_ERROR: applogr(false, LOG_ERR, "%s: Can not check device %s", ztex_drv.dname, info->devid); case CHECK_IS_NOT_ZTEX: return false; case CHECK_OK: break; case CHECK_RESCAN: bfg_need_detect_rescan = true; return false; } int fpgacount; struct libztex_device *ztex_master; struct cgpu_info *ztex; ztex_master = libztex_prepare_device2(usbdev); if (!ztex_master) applogr(false, LOG_ERR, "%s: libztex_prepare_device2 failed on %s", ztex_drv.dname, info->devid); if (bfg_claim_usb(&ztex_drv, true, ztex_master->usbbus, ztex_master->usbaddress)) return false; ztex_master->root = ztex_master; fpgacount = libztex_numberOfFpgas(ztex_master); ztex_master->handles = fpgacount; ztex = ztex_setup(ztex_master, fpgacount); if (fpgacount > 1) pthread_mutex_init(&ztex->device_ztex->mutex, NULL); return true; } static bool ztex_change_clock_func(struct thr_info *thr, int bestM) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex = thr->cgpu->device_ztex; ztex_selectFpga(ztex, cgpu->proc_id); libztex_setFreq(ztex, bestM, cgpu->proc_repr); ztex_releaseFpga(ztex); return true; } static bool ztex_updateFreq(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex = thr->cgpu->device_ztex; bool rv = dclk_updateFreq(&ztex->dclk, ztex_change_clock_func, thr); if (unlikely(!rv)) { ztex_selectFpga(ztex, cgpu->proc_id); libztex_resetFpga(ztex); ztex_releaseFpga(ztex); } return rv; } static bool ztex_checkNonce(struct cgpu_info *cgpu, struct work *work, struct libztex_hash_data *hdata) { uint32_t *data32 = (uint32_t *)(work->data); unsigned char swap[80]; uint32_t *swap32 = (uint32_t *)swap; unsigned char hash1[32]; unsigned char hash2[32]; uint32_t *hash2_32 = (uint32_t *)hash2; swap32[76/4] = htobe32(hdata->nonce); swap32yes(swap32, data32, 76 / 4); sha256(swap, 80, hash1); sha256(hash1, 32, hash2); if (be32toh(hash2_32[7]) != ((hdata->hash7 + 0x5be0cd19) & 0xFFFFFFFF)) { applog(LOG_DEBUG, "%"PRIpreprv": checkNonce failed for %08x", cgpu->proc_repr, hdata->nonce); return false; } return true; } static int64_t ztex_scanhash(struct thr_info *thr, struct work *work, __maybe_unused int64_t max_nonce) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex; unsigned char sendbuf[44]; int i, j, k; uint32_t *backlog; int backlog_p = 0, backlog_max; uint32_t *lastnonce; uint32_t nonce, noncecnt = 0; bool overflow, found; struct libztex_hash_data hdata[GOLDEN_BACKLOG]; if (thr->cgpu->deven == DEV_DISABLED) return -1; ztex = thr->cgpu->device_ztex; memcpy(sendbuf, work->data + 64, 12); memcpy(sendbuf + 12, work->midstate, 32); ztex_selectFpga(ztex, cgpu->proc_id); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // Something wrong happened in send applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, retrying", cgpu->proc_repr, i); cgsleep_ms(500); i = libztex_sendHashData(ztex, sendbuf); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%"PRIpreprv": Failed to send hash data with err %d, giving up", cgpu->proc_repr, i); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); applog(LOG_DEBUG, "%"PRIpreprv": sent hashdata", cgpu->proc_repr); lastnonce = calloc(1, sizeof(uint32_t)*ztex->numNonces); if (lastnonce == NULL) { applog(LOG_ERR, "%"PRIpreprv": failed to allocate lastnonce[%d]", cgpu->proc_repr, ztex->numNonces); return -1; } /* Add an extra slot for detecting dupes that lie around */ backlog_max = ztex->numNonces * (2 + ztex->extraSolutions); backlog = calloc(1, sizeof(uint32_t) * backlog_max); if (backlog == NULL) { applog(LOG_ERR, "%"PRIpreprv": failed to allocate backlog[%d]", cgpu->proc_repr, backlog_max); free(lastnonce); return -1; } overflow = false; int count = 0; applog(LOG_DEBUG, "%"PRIpreprv": entering poll loop", cgpu->proc_repr); while (!(overflow || thr->work_restart)) { count++; if (!restart_wait(thr, 250)) { applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr); break; } ztex_selectFpga(ztex, cgpu->proc_id); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // Something wrong happened in read applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, retrying", cgpu->proc_repr, i); cgsleep_ms(500); i = libztex_readHashData(ztex, &hdata[0]); if (i < 0) { // And there's nothing we can do about it ztex_disable(thr); applog(LOG_ERR, "%"PRIpreprv": Failed to read hash data with err %d, giving up", cgpu->proc_repr, i); free(lastnonce); free(backlog); ztex_releaseFpga(ztex); return -1; } } ztex_releaseFpga(ztex); if (thr->work_restart) { applog(LOG_DEBUG, "%"PRIpreprv": New work detected", cgpu->proc_repr); break; } dclk_gotNonces(&ztex->dclk); for (i = 0; i < ztex->numNonces; i++) { nonce = hdata[i].nonce; if (nonce > noncecnt) noncecnt = nonce; if (((0xffffffff - nonce) < (nonce - lastnonce[i])) || nonce < lastnonce[i]) { applog(LOG_DEBUG, "%"PRIpreprv": overflow nonce=%08x lastnonce=%08x", cgpu->proc_repr, nonce, lastnonce[i]); overflow = true; } else lastnonce[i] = nonce; if (!ztex_checkNonce(cgpu, work, &hdata[i])) { // do not count errors in the first 500ms after sendHashData (2x250 wait time) if (count > 2) dclk_errorCount(&ztex->dclk, 1.0 / ztex->numNonces); inc_hw_errors_only(thr); } for (j=0; j<=ztex->extraSolutions; j++) { nonce = hdata[i].goldenNonce[j]; if (nonce == ztex->offsNonces) { continue; } found = false; for (k = 0; k < backlog_max; k++) { if (backlog[k] == nonce) { found = true; break; } } if (!found) { backlog[backlog_p++] = nonce; if (backlog_p >= backlog_max) backlog_p = 0; work->blk.nonce = 0xffffffff; if (!j || test_nonce(work, nonce, false)) submit_nonce(thr, work, nonce); applog(LOG_DEBUG, "%"PRIpreprv": submitted %08x (from N%dE%d)", cgpu->proc_repr, nonce, i, j); } } } } dclk_preUpdate(&ztex->dclk); if (!ztex_updateFreq(thr)) { // Something really serious happened, so mark this thread as dead! free(lastnonce); free(backlog); return -1; } applog(LOG_DEBUG, "%"PRIpreprv": exit %1.8X", cgpu->proc_repr, noncecnt); work->blk.nonce = 0xffffffff; free(lastnonce); free(backlog); return noncecnt; } static struct api_data* get_ztex_drv_extra_device_status(struct cgpu_info *ztex) { struct api_data*root = NULL; struct libztex_device *ztexr = ztex->device_ztex; if (ztexr) { double frequency = ztexr->freqM1 * (ztexr->dclk.freqM + 1); root = api_add_freq(root, "Frequency", &frequency, true); } return root; } static bool ztex_prepare(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex = cgpu->device_ztex; { char *fpganame = malloc(LIBZTEX_SNSTRING_LEN+3+1); sprintf(fpganame, "%s-%u", ztex->snString, cgpu->proc_id+1); cgpu->name = fpganame; } ztex_selectFpga(ztex, cgpu->proc_id); if (libztex_configureFpga(ztex, cgpu->proc_repr) != 0) { libztex_resetFpga(ztex); ztex_releaseFpga(ztex); applog(LOG_ERR, "%"PRIpreprv": Disabling!", cgpu->proc_repr); thr->cgpu->deven = DEV_DISABLED; return true; } ztex->dclk.freqM = ztex->dclk.freqMaxM+1;; //ztex_updateFreq(thr); libztex_setFreq(ztex, ztex->dclk.freqMDefault, cgpu->proc_repr); ztex_releaseFpga(ztex); notifier_init(thr->work_restart_notifier); applog(LOG_DEBUG, "%"PRIpreprv": prepare", cgpu->proc_repr); cgpu->status = LIFE_INIT2; return true; } static void ztex_shutdown(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; struct libztex_device *ztex = cgpu->device_ztex; if (!ztex) return; cgpu->device_ztex = NULL; applog(LOG_DEBUG, "%"PRIpreprv": shutdown", cgpu->proc_repr); if (--ztex->handles) return; applog(LOG_DEBUG, "%s: No handles remaining, destroying libztex device", cgpu->dev_repr); if (ztex->root->numberOfFpgas > 1) pthread_mutex_destroy(&ztex->mutex); libztex_destroy_device(ztex); } static void ztex_disable(struct thr_info *thr) { applog(LOG_ERR, "%"PRIpreprv": Disabling!", thr->cgpu->proc_repr); thr->cgpu->deven = DEV_DISABLED; ztex_shutdown(thr); } struct device_drv ztex_drv = { .dname = "ztex", .name = "ZTX", .lowl_match = ztex_lowl_match, .lowl_probe = ztex_lowl_probe, .get_api_extra_device_status = get_ztex_drv_extra_device_status, .thread_init = ztex_prepare, .scanhash = ztex_scanhash, .thread_shutdown = ztex_shutdown, }; bfgminer-bfgminer-3.10.0/dynclock.c000066400000000000000000000077211226556647300171540ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2012 nelisky * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include "dynclock.h" #include "miner.h" void dclk_prepare(struct dclk_data *data) { *data = (struct dclk_data){ // after 275 sample periods .minGoodSamples = 150., .freqMinM = 1, }; } void dclk_msg_freqchange(const char *repr, int oldFreq, int newFreq, const char *tail) { applog(LOG_NOTICE, "%"PRIpreprv": Frequency %s from %u to %u MHz%s", repr, (oldFreq > newFreq ? "dropped" : "raised "), oldFreq, newFreq, tail ?: "" ); } bool dclk_updateFreq(struct dclk_data *data, dclk_change_clock_func_t changeclock, struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; uint8_t freqMDefault = data->freqMDefault; int i, maxM, bestM; double bestR, r; bool rv = true; if (freqMDefault > data->freqMaxM) // This occurs when the device in question adjusts its MaxM down due to temperature or similar reasons freqMDefault = data->freqMaxM; for (i = 0; i < data->freqMaxM; i++) if (data->maxErrorRate[i + 1] * i < data->maxErrorRate[i] * (i + 20)) data->maxErrorRate[i + 1] = data->maxErrorRate[i] * (1.0 + 20.0 / i); maxM = 0; // Use max mulitplier up to the default as far as possible without hitting the max error rate while (maxM < freqMDefault && data->maxErrorRate[maxM + 1] < DCLK_MAXMAXERRORRATE) maxM++; // Use max mulitplier beyond the default if it's never hit the max error rate, and our current max has collected sufficient samples while (maxM < data->freqMaxM && data->maxErrorRate[maxM + 1] < DCLK_MAXMAXERRORRATE && data->errorWeight[maxM] >= data->minGoodSamples) maxM++; // Find the multiplier that gives the best hashrate bestM = data->freqMinM; bestR = 0; for (i = bestM; i <= maxM; i++) { // Hashrate is weighed on a linear scale r = (i + 1); // The currently selected frequency gets a small "bonus" in comparison, as hysteresis if (i == data->freqM) r += DCLK_ERRORHYSTERESIS; // Adjust for measured error rate r *= (1 - data->maxErrorRate[i]); // If it beats the current best, update best* if (r > bestR) { bestM = i; bestR = r; } } // Actually change the clock if the best multiplier is not currently selected if (bestM != data->freqM) { rv = changeclock(thr, bestM); } // Find the highest multiplier that we've taken a reasonable sampling of maxM = freqMDefault; while (maxM < data->freqMaxM && data->errorWeight[maxM + 1] > 100) maxM++; // If the new multiplier is some fraction of the highest we've used long enough to get a good sample, assume there is something wrong and instruct the driver to shut it off if ((bestM < (1.0 - DCLK_OVERHEATTHRESHOLD) * maxM) && bestM < maxM - 1) { applog(LOG_ERR, "%"PRIpreprv": frequency drop of %.1f%% detect. This may be caused by overheating. FPGA is shut down to prevent damage.", cgpu->proc_repr, (1.0 - 1.0 * bestM / maxM) * 100); return false; } return rv; } void dclk_gotNonces(struct dclk_data *data) { data->errorCount[data->freqM] *= 0.995; data->errorWeight[data->freqM] = data->errorWeight[data->freqM] * 0.995 + 1.0; } void dclk_errorCount(struct dclk_data *data, double portion) { data->errorCount[data->freqM] += portion; } void dclk_preUpdate(struct dclk_data *data) { data->errorRate[data->freqM] = data->errorCount[data->freqM] / data->errorWeight[data->freqM]; // errorWeight 100 begins after sample period 137; before then, we minimize the effect of measured errorRate if (data->errorWeight[data->freqM] < 100) data->errorRate[data->freqM] /= 100; if (data->errorRate[data->freqM] > data->maxErrorRate[data->freqM]) data->maxErrorRate[data->freqM] = data->errorRate[data->freqM]; } bfgminer-bfgminer-3.10.0/dynclock.h000066400000000000000000000043271226556647300171600ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2012 nelisky * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef DYNCLOCK_H #define DYNCLOCK_H #include #include struct thr_info; #define DCLK_MAXMAXERRORRATE 0.05 #define DCLK_ERRORHYSTERESIS 0.1 #define DCLK_OVERHEATTHRESHOLD 0.4 struct dclk_data { // Current frequency multiplier uint8_t freqM; // Minimum frequency multiplier to consider (set by driver) uint8_t freqMinM; // Maximum frequency multiplier to consider (set by driver) uint8_t freqMaxM; // "Default" frequency multiplier to work with (set by driver) uint8_t freqMDefault; // Threshold before errorWeight is considered reasonably constant // NOTE: This is not a mere number of sampling periods (but related) uint8_t minGoodSamples; // Numerator of errorWeight after dclk_errorCount double errorCount[256]; // Approaches 200 double errorWeight[256]; // Error rate (0.0 - 1.0) as of end of last sampling period double errorRate[256]; // Highest error rate (0.0 - 1.0) encountered double maxErrorRate[256]; }; typedef bool (*dclk_change_clock_func_t)(struct thr_info *, int multiplier); // Standard applog message called by driver frequency-change functions extern void dclk_msg_freqchange(const char *, int oldFreq, int newFreq, const char *tail); // Called to initialize dclk_data at startup extern void dclk_prepare(struct dclk_data *data); // Called to start a sampling period extern void dclk_gotNonces(struct dclk_data *); // Called to increment the current sampling period's error rate (1.0 "portion" is 100% errors) extern void dclk_errorCount(struct dclk_data *, double portion); // Called after a sampling period is completed to update actual error rate extern void dclk_preUpdate(struct dclk_data *data); // Called after a sampling period is completed, and error rate updated, to make actual clock adjustments extern bool dclk_updateFreq(struct dclk_data *, dclk_change_clock_func_t changeclock, struct thr_info *); #endif bfgminer-bfgminer-3.10.0/example.conf000066400000000000000000000015141226556647300174760ustar00rootroot00000000000000{ "pools" : [ { "url" : "http://url1:8332", "user" : "user1", "pass" : "pass1" }, { "url" : "http://url2:8344", "pool-proxy" : "socks5://127.0.0.1:1080", "user" : "user2", "pass" : "pass2" }, { "url" : "http://url3:8332", "user" : "user3", "pass" : "pass3" } ], "intensity" : "d,9,9,9", "gpu-engine" : "0-985,0-950,0-960,0-1000", "gpu-fan" : "0-85,0-85,0-85,0-85", "gpu-memclock" : "860,825,835,875", "gpu-powertune" : "20,20,20,20", "temp-cutoff" : "95,95,95,95", "temp-overheat" : "85,85,85,85", "temp-target" : "75,75,75,75", "auto-fan" : true, "auto-gpu" : true, "expiry" : 120, "failover-only" : true, "gpu-threads" : 2, "log" : 5, "queue" : 1, "scan-time" : 60, "temp-hysteresis" : 3, "scan-serial" : [ "/dev/ttyUSB0", "/dev/ttyUSB1", "/dev/ttyUSB2", "/dev/ttyUSB3" ], "kernel-path" : "/usr/local/bin" } bfgminer-bfgminer-3.10.0/findnonce.c000066400000000000000000000140311226556647300173010ustar00rootroot00000000000000/* * Copyright 2011-2013 Con Kolivas * Copyright 2012-2013 Luke Dashjr * Copyright 2011 Nils Schneider * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef HAVE_OPENCL #include #include #include #include #include #include "findnonce.h" #include "miner.h" #include "scrypt.h" const uint32_t SHA256_K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; #define rotate(x,y) ((x<>(sizeof(x)*8-y))) #define rotr(x,y) ((x>>y) | (x<<(sizeof(x)*8-y))) #define R(a, b, c, d, e, f, g, h, w, k) \ h = h + (rotate(e, 26) ^ rotate(e, 21) ^ rotate(e, 7)) + (g ^ (e & (f ^ g))) + k + w; \ d = d + h; \ h = h + (rotate(a, 30) ^ rotate(a, 19) ^ rotate(a, 10)) + ((a & b) | (c & (a | b))) void precalc_hash(dev_blk_ctx *blk, uint32_t *state, uint32_t *data) { cl_uint A, B, C, D, E, F, G, H; A = state[0]; B = state[1]; C = state[2]; D = state[3]; E = state[4]; F = state[5]; G = state[6]; H = state[7]; R(A, B, C, D, E, F, G, H, data[0], SHA256_K[0]); R(H, A, B, C, D, E, F, G, data[1], SHA256_K[1]); R(G, H, A, B, C, D, E, F, data[2], SHA256_K[2]); blk->cty_a = A; blk->cty_b = B; blk->cty_c = C; blk->cty_d = D; blk->D1A = D + 0xb956c25b; blk->cty_e = E; blk->cty_f = F; blk->cty_g = G; blk->cty_h = H; blk->ctx_a = state[0]; blk->ctx_b = state[1]; blk->ctx_c = state[2]; blk->ctx_d = state[3]; blk->ctx_e = state[4]; blk->ctx_f = state[5]; blk->ctx_g = state[6]; blk->ctx_h = state[7]; blk->merkle = data[0]; blk->ntime = data[1]; blk->nbits = data[2]; blk->W16 = blk->fW0 = data[0] + (rotr(data[1], 7) ^ rotr(data[1], 18) ^ (data[1] >> 3)); blk->W17 = blk->fW1 = data[1] + (rotr(data[2], 7) ^ rotr(data[2], 18) ^ (data[2] >> 3)) + 0x01100000; blk->PreVal4 = blk->fcty_e = blk->ctx_e + (rotr(B, 6) ^ rotr(B, 11) ^ rotr(B, 25)) + (D ^ (B & (C ^ D))) + 0xe9b5dba5; blk->T1 = blk->fcty_e2 = (rotr(F, 2) ^ rotr(F, 13) ^ rotr(F, 22)) + ((F & G) | (H & (F | G))); blk->PreVal4_2 = blk->PreVal4 + blk->T1; blk->PreVal0 = blk->PreVal4 + blk->ctx_a; blk->PreW31 = 0x00000280 + (rotr(blk->W16, 7) ^ rotr(blk->W16, 18) ^ (blk->W16 >> 3)); blk->PreW32 = blk->W16 + (rotr(blk->W17, 7) ^ rotr(blk->W17, 18) ^ (blk->W17 >> 3)); blk->PreW18 = data[2] + (rotr(blk->W16, 17) ^ rotr(blk->W16, 19) ^ (blk->W16 >> 10)); blk->PreW19 = 0x11002000 + (rotr(blk->W17, 17) ^ rotr(blk->W17, 19) ^ (blk->W17 >> 10)); blk->W2 = data[2]; blk->W2A = blk->W2 + (rotr(blk->W16, 19) ^ rotr(blk->W16, 17) ^ (blk->W16 >> 10)); blk->W17_2 = 0x11002000 + (rotr(blk->W17, 19) ^ rotr(blk->W17, 17) ^ (blk->W17 >> 10)); blk->fW2 = data[2] + (rotr(blk->fW0, 17) ^ rotr(blk->fW0, 19) ^ (blk->fW0 >> 10)); blk->fW3 = 0x11002000 + (rotr(blk->fW1, 17) ^ rotr(blk->fW1, 19) ^ (blk->fW1 >> 10)); blk->fW15 = 0x00000280 + (rotr(blk->fW0, 7) ^ rotr(blk->fW0, 18) ^ (blk->fW0 >> 3)); blk->fW01r = blk->fW0 + (rotr(blk->fW1, 7) ^ rotr(blk->fW1, 18) ^ (blk->fW1 >> 3)); blk->PreVal4addT1 = blk->PreVal4 + blk->T1; blk->T1substate0 = blk->ctx_a - blk->T1; blk->C1addK5 = blk->cty_c + SHA256_K[5]; blk->B1addK6 = blk->cty_b + SHA256_K[6]; blk->PreVal0addK7 = blk->PreVal0 + SHA256_K[7]; blk->W16addK16 = blk->W16 + SHA256_K[16]; blk->W17addK17 = blk->W17 + SHA256_K[17]; blk->zeroA = blk->ctx_a + 0x98c7e2a2; blk->zeroB = blk->ctx_a + 0xfc08884d; blk->oneA = blk->ctx_b + 0x90bb1e3c; blk->twoA = blk->ctx_c + 0x50c6645b; blk->threeA = blk->ctx_d + 0x3ac42e24; blk->fourA = blk->ctx_e + SHA256_K[4]; blk->fiveA = blk->ctx_f + SHA256_K[5]; blk->sixA = blk->ctx_g + SHA256_K[6]; blk->sevenA = blk->ctx_h + SHA256_K[7]; } struct pc_data { struct thr_info *thr; struct work work; uint32_t res[SCRYPT_MAXBUFFERS]; pthread_t pth; int found; }; static void *postcalc_hash(void *userdata) { struct pc_data *pcd = (struct pc_data *)userdata; struct thr_info *thr = pcd->thr; unsigned int entry = 0; int found = opt_scrypt ? SCRYPT_FOUND : FOUND; pthread_detach(pthread_self()); RenameThread("postcalchsh"); /* To prevent corrupt values in FOUND from trying to read beyond the * end of the res[] array */ if (unlikely(pcd->res[found] & ~found)) { applog(LOG_WARNING, "%"PRIpreprv": invalid nonce count - HW error", thr->cgpu->proc_repr); inc_hw_errors_only(thr); pcd->res[found] &= found; } for (entry = 0; entry < pcd->res[found]; entry++) { uint32_t nonce = pcd->res[entry]; applog(LOG_DEBUG, "OCL NONCE %u found in slot %d", nonce, entry); submit_nonce(thr, &pcd->work, nonce); } clean_work(&pcd->work); free(pcd); return NULL; } void postcalc_hash_async(struct thr_info *thr, struct work *work, uint32_t *res) { struct pc_data *pcd = malloc(sizeof(struct pc_data)); int buffersize; if (unlikely(!pcd)) { applog(LOG_ERR, "Failed to malloc pc_data in postcalc_hash_async"); return; } *pcd = (struct pc_data){ .thr = thr, }; __copy_work(&pcd->work, work); buffersize = opt_scrypt ? SCRYPT_BUFFERSIZE : BUFFERSIZE; memcpy(&pcd->res, res, buffersize); if (pthread_create(&pcd->pth, NULL, postcalc_hash, (void *)pcd)) { applog(LOG_ERR, "Failed to create postcalc_hash thread"); return; } } #endif /* HAVE_OPENCL */ bfgminer-bfgminer-3.10.0/findnonce.h000066400000000000000000000011411226556647300173040ustar00rootroot00000000000000#ifndef __FINDNONCE_H__ #define __FINDNONCE_H__ #include #include "miner.h" #include "config.h" #define MAXTHREADS (0xFFFFFFFEULL) #define MAXBUFFERS (0x10) #define BUFFERSIZE (sizeof(uint32_t) * MAXBUFFERS) #define FOUND (0x0F) #define SCRYPT_MAXBUFFERS (0x100) #define SCRYPT_BUFFERSIZE (sizeof(uint32_t) * SCRYPT_MAXBUFFERS) #define SCRYPT_FOUND (0xFF) #ifdef HAVE_OPENCL extern void precalc_hash(dev_blk_ctx *blk, uint32_t *state, uint32_t *data); extern void postcalc_hash_async(struct thr_info *thr, struct work *work, uint32_t *res); #endif /* HAVE_OPENCL */ #endif /*__FINDNONCE_H__*/ bfgminer-bfgminer-3.10.0/ft232r.c000066400000000000000000000216451226556647300163710ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include "compat.h" #include "ft232r.h" #include "logging.h" #include "lowlevel.h" #include "miner.h" #define FT232R_IDVENDOR 0x0403 #define FT232R_IDPRODUCT 0x6001 static void ft232r_devinfo_free(struct lowlevel_device_info * const info) { libusb_device * const dev = info->lowl_data; if (dev) libusb_unref_device(dev); } static bool _ft232r_devinfo_scan_cb(struct lowlevel_device_info * const usbinfo, void * const userp) { struct lowlevel_device_info **devinfo_list_p = userp, *info; info = malloc(sizeof(*info)); *info = (struct lowlevel_device_info){ .lowl = &lowl_ft232r, .lowl_data = libusb_ref_device(usbinfo->lowl_data), }; lowlevel_devinfo_semicpy(info, usbinfo); LL_PREPEND(*devinfo_list_p, info); // Never *consume* the lowl_usb entry - especially since this is during the scan! return false; } static struct lowlevel_device_info *ft232r_devinfo_scan() { struct lowlevel_device_info *devinfo_list = NULL; lowlevel_detect_id(_ft232r_devinfo_scan_cb, &devinfo_list, &lowl_usb, FT232R_IDVENDOR, FT232R_IDPRODUCT); return devinfo_list; } #define FTDI_REQTYPE (LIBUSB_REQUEST_TYPE_VENDOR | LIBUSB_RECIPIENT_DEVICE) #define FTDI_REQTYPE_IN (FTDI_REQTYPE | LIBUSB_ENDPOINT_IN) #define FTDI_REQTYPE_OUT (FTDI_REQTYPE | LIBUSB_ENDPOINT_OUT) #define FTDI_REQUEST_RESET 0 #define FTDI_REQUEST_SET_BAUDRATE 3 #define FTDI_REQUEST_SET_BITMODE 0x0b #define FTDI_REQUEST_GET_PINS 0x0c #define FTDI_REQUEST_GET_BITMODE 0x0c #define FTDI_BAUDRATE_3M 0,0 #define FTDI_INDEX 1 #define FTDI_TIMEOUT 1000 struct ft232r_device_handle { libusb_device_handle *h; uint8_t i; uint8_t o; unsigned char ibuf[256]; int ibufLen; uint16_t osz; unsigned char *obuf; uint16_t obufsz; }; struct ft232r_device_handle *ft232r_open(struct lowlevel_device_info *info) { libusb_device * const dev = info->lowl_data; info->lowl_data = NULL; if (!dev) return NULL; // FIXME: Cleanup on errors libusb_device_handle *devh; struct ft232r_device_handle *ftdi; if (libusb_open(dev, &devh)) { applog(LOG_ERR, "ft232r_open: Error opening device"); return NULL; } libusb_reset_device(devh); libusb_detach_kernel_driver(devh, 0); if (libusb_set_configuration(devh, 1)) { applog(LOG_ERR, "ft232r_open: Error setting configuration"); return NULL; } if (libusb_claim_interface(devh, 0)) { applog(LOG_ERR, "ft232r_open: Error claiming interface"); return NULL; } if (libusb_control_transfer(devh, FTDI_REQTYPE_OUT, FTDI_REQUEST_SET_BAUDRATE, FTDI_BAUDRATE_3M, NULL, 0, FTDI_TIMEOUT) < 0) { applog(LOG_ERR, "ft232r_open: Error performing control transfer"); return NULL; } struct libusb_config_descriptor *cfg; if (libusb_get_config_descriptor(dev, 0, &cfg)) { applog(LOG_ERR, "ft232r_open: Error getting config descriptor"); return NULL; } const struct libusb_interface_descriptor *altcfg = &cfg->interface[0].altsetting[0]; if (altcfg->bNumEndpoints < 2) { applog(LOG_ERR, "ft232r_open: Too few endpoints"); return NULL; } ftdi = calloc(1, sizeof(*ftdi)); ftdi->h = devh; ftdi->i = altcfg->endpoint[0].bEndpointAddress; ftdi->o = altcfg->endpoint[1].bEndpointAddress; ftdi->osz = 0x1000; ftdi->obuf = malloc(ftdi->osz); libusb_free_config_descriptor(cfg); return ftdi; } void ft232r_close(struct ft232r_device_handle *dev) { libusb_release_interface(dev->h, 0); libusb_reset_device(dev->h); libusb_close(dev->h); } bool ft232r_purge_buffers(struct ft232r_device_handle *dev, enum ft232r_reset_purge purge) { if (ft232r_flush(dev) < 0) return false; if (purge & FTDI_PURGE_RX) { if (libusb_control_transfer(dev->h, FTDI_REQTYPE_OUT, FTDI_REQUEST_RESET, FTDI_PURGE_RX, FTDI_INDEX, NULL, 0, FTDI_TIMEOUT)) return false; dev->ibufLen = 0; } if (purge & FTDI_PURGE_TX) if (libusb_control_transfer(dev->h, FTDI_REQTYPE_OUT, FTDI_REQUEST_RESET, FTDI_PURGE_TX, FTDI_INDEX, NULL, 0, FTDI_TIMEOUT)) return false; return true; } bool ft232r_set_bitmode(struct ft232r_device_handle *dev, uint8_t mask, uint8_t mode) { if (ft232r_flush(dev) < 0) return false; if (libusb_control_transfer(dev->h, FTDI_REQTYPE_OUT, FTDI_REQUEST_SET_BITMODE, mask, FTDI_INDEX, NULL, 0, FTDI_TIMEOUT)) return false; return !libusb_control_transfer(dev->h, FTDI_REQTYPE_OUT, FTDI_REQUEST_SET_BITMODE, (mode << 8) | mask, FTDI_INDEX, NULL, 0, FTDI_TIMEOUT); } static ssize_t ft232r_readwrite(struct ft232r_device_handle *dev, unsigned char endpoint, void *data, size_t count) { int transferred; switch (libusb_bulk_transfer(dev->h, endpoint, data, count, &transferred, FTDI_TIMEOUT)) { case LIBUSB_ERROR_TIMEOUT: if (!transferred) { errno = ETIMEDOUT; return -1; } case 0: return transferred; default: errno = EIO; return -1; } } ssize_t ft232r_flush(struct ft232r_device_handle *dev) { if (!dev->obufsz) return 0; ssize_t r = ft232r_readwrite(dev, dev->o, dev->obuf, dev->obufsz); if (r == dev->obufsz) { dev->obufsz = 0; } else if (r > 0) { dev->obufsz -= r; memmove(dev->obuf, &dev->obuf[r], dev->obufsz); } return r; } ssize_t ft232r_write(struct ft232r_device_handle *dev, void *data, size_t count) { uint16_t bufleft; ssize_t r; bufleft = dev->osz - dev->obufsz; if (count < bufleft) { // Just add to output buffer memcpy(&dev->obuf[dev->obufsz], data, count); dev->obufsz += count; return count; } // Fill up buffer and flush memcpy(&dev->obuf[dev->obufsz], data, bufleft); dev->obufsz += bufleft; r = ft232r_flush(dev); if (unlikely(r <= 0)) { // In this case, no bytes were written supposedly, so remove this data from buffer dev->obufsz -= bufleft; return r; } // Even if not all bytes from this write got out, the remaining are still buffered return bufleft; } typedef ssize_t (*ft232r_rwfunc_t)(struct ft232r_device_handle *, void*, size_t); static ssize_t ft232r_rw_all(ft232r_rwfunc_t rwfunc, struct ft232r_device_handle *dev, void *data, size_t count) { char *p = data; ssize_t writ = 0, total = 0; while (count && (writ = rwfunc(dev, p, count)) > 0) { p += writ; count -= writ; total += writ; } return total ?: writ; } ssize_t ft232r_write_all(struct ft232r_device_handle *dev, void *data, size_t count) { return ft232r_rw_all(ft232r_write, dev, data, count); } ssize_t ft232r_read(struct ft232r_device_handle *dev, void *data, size_t count) { ssize_t r; int adj; // Flush any pending output before reading r = ft232r_flush(dev); if (r < 0) return r; // First 2 bytes of every 0x40 are FTDI status or something while (dev->ibufLen <= 2) { // TODO: Implement a timeout for status byte repeating int transferred = ft232r_readwrite(dev, dev->i, dev->ibuf, sizeof(dev->ibuf)); if (transferred <= 0) return transferred; dev->ibufLen = transferred; for (adj = 0x40; dev->ibufLen > adj; adj += 0x40 - 2) { dev->ibufLen -= 2; memmove(&dev->ibuf[adj], &dev->ibuf[adj+2], dev->ibufLen - adj); } } unsigned char *ibufs = &dev->ibuf[2]; size_t ibufsLen = dev->ibufLen - 2; if (count > ibufsLen) count = ibufsLen; memcpy(data, ibufs, count); dev->ibufLen -= count; ibufsLen -= count; if (ibufsLen) { memmove(ibufs, &ibufs[count], ibufsLen); applog(LOG_DEBUG, "ft232r_read: %"PRIu64" bytes extra", (uint64_t)ibufsLen); } return count; } ssize_t ft232r_read_all(struct ft232r_device_handle *dev, void *data, size_t count) { return ft232r_rw_all(ft232r_read, dev, data, count); } bool ft232r_get_pins(struct ft232r_device_handle *dev, uint8_t *pins) { return libusb_control_transfer(dev->h, FTDI_REQTYPE_IN, FTDI_REQUEST_GET_PINS, 0, FTDI_INDEX, pins, 1, FTDI_TIMEOUT) == 1; } bool ft232r_get_bitmode(struct ft232r_device_handle *dev, uint8_t *out_mode) { return libusb_control_transfer(dev->h, FTDI_REQTYPE_IN, FTDI_REQUEST_GET_BITMODE, 0, FTDI_INDEX, out_mode, 1, FTDI_TIMEOUT) == 1; } bool ft232r_set_cbus_bits(struct ft232r_device_handle *dev, bool sc, bool cs) { uint8_t pin_state = (cs ? (1<<2) : 0) | (sc ? (1<<3) : 0); return ft232r_set_bitmode(dev, 0xc0 | pin_state, 0x20); } bool ft232r_get_cbus_bits(struct ft232r_device_handle *dev, bool *out_sio0, bool *out_sio1) { uint8_t data; if (!ft232r_get_bitmode(dev, &data)) return false; *out_sio0 = data & 1; *out_sio1 = data & 2; return true; } struct lowlevel_driver lowl_ft232r = { .dname = "ft232r", .devinfo_scan = ft232r_devinfo_scan, .devinfo_free = ft232r_devinfo_free, }; #if 0 int main() { libusb_init(NULL); ft232r_scan(); ft232r_scan_free(); libusb_exit(NULL); } void applog(int prio, const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintf(fmt, ap); puts(""); va_end(ap); } #endif bfgminer-bfgminer-3.10.0/ft232r.h000066400000000000000000000030611226556647300163660ustar00rootroot00000000000000/* * Copyright 2012 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef BFGMINER_FT232R_H #define BFGMINER_FT232R_H #include #include #include #include "lowlevel.h" enum ft232r_reset_purge { FTDI_PURGE_RX = 1, FTDI_PURGE_TX = 2, FTDI_PURGE_BOTH = 3, }; struct ft232r_device_handle; extern struct ft232r_device_handle *ft232r_open(struct lowlevel_device_info *); extern void ft232r_close(struct ft232r_device_handle *); extern bool ft232r_purge_buffers(struct ft232r_device_handle *, enum ft232r_reset_purge); extern bool ft232r_set_bitmode(struct ft232r_device_handle *, uint8_t mask, uint8_t mode); extern ssize_t ft232r_flush(struct ft232r_device_handle *); extern ssize_t ft232r_write(struct ft232r_device_handle *, void *data, size_t count); extern ssize_t ft232r_write_all(struct ft232r_device_handle *, void *data, size_t count); extern ssize_t ft232r_read(struct ft232r_device_handle *, void *buf, size_t count); extern ssize_t ft232r_read_all(struct ft232r_device_handle *, void *data, size_t count); extern bool ft232r_get_pins(struct ft232r_device_handle *, uint8_t *pins); extern bool ft232r_set_cbus_bits(struct ft232r_device_handle *dev, bool sc, bool cs); extern bool ft232r_get_cbus_bits(struct ft232r_device_handle *dev, bool *out_sio0, bool *out_sio1); #endif bfgminer-bfgminer-3.10.0/hexdump.c000066400000000000000000000040721226556647300170140ustar00rootroot00000000000000/* * hexdump implementation without depenecies to *printf() * output is equal to 'hexdump -C' * should be compatible to 64bit architectures * * Copyright (c) 2009 Daniel Mack * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include "config.h" #include "logging.h" #define hex_print(p) applog(LOG_DEBUG, "%s", p) static char nibble[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; #define BYTES_PER_LINE 0x10 void hexdump(const void *vp, unsigned int len) { const unsigned char *p = vp; unsigned int i, addr; unsigned int wordlen = sizeof(void*); unsigned char v, line[BYTES_PER_LINE * 5]; for (addr = 0; addr < len; addr += BYTES_PER_LINE) { /* clear line */ for (i = 0; i < sizeof(line); i++) { if (i == wordlen * 2 + 52 || i == wordlen * 2 + 69) { line[i] = '|'; continue; } if (i == wordlen * 2 + 70) { line[i] = '\0'; continue; } line[i] = ' '; } /* print address */ for (i = 0; i < wordlen * 2; i++) { v = addr >> ((wordlen * 2 - i - 1) * 4); line[i] = nibble[v & 0xf]; } /* dump content */ for (i = 0; i < BYTES_PER_LINE; i++) { int pos = (wordlen * 2) + 3 + (i / 8); if (addr + i >= len) break; v = p[addr + i]; line[pos + (i * 3) + 0] = nibble[v >> 4]; line[pos + (i * 3) + 1] = nibble[v & 0xf]; /* character printable? */ line[(wordlen * 2) + 53 + i] = (v >= ' ' && v <= '~') ? v : '.'; } hex_print(line); } } bfgminer-bfgminer-3.10.0/httpsrv.c000066400000000000000000000050001226556647300170440ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef WIN32 #include #endif #include #ifndef WIN32 #include #include #include #endif #include #include "logging.h" #include "util.h" static struct MHD_Daemon *httpsrv; extern int handle_getwork(struct MHD_Connection *, bytes_t *); void httpsrv_prepare_resp(struct MHD_Response *resp) { MHD_add_response_header(resp, MHD_HTTP_HEADER_SERVER, PACKAGE"/"VERSION" getwork server"); } static int httpsrv_handle_req(struct MHD_Connection *conn, const char *url, const char *method, bytes_t *upbuf) { return handle_getwork(conn, upbuf); } static int httpsrv_handle_access(void *cls, struct MHD_Connection *conn, const char *url, const char *method, const char *version, const char *upload_data, size_t *upload_data_size, void **con_cls) { bytes_t *upbuf; if (!*con_cls) { *con_cls = upbuf = malloc(sizeof(bytes_t)); bytes_init(upbuf); return MHD_YES; } upbuf = *con_cls; if (*upload_data_size) { bytes_append(upbuf, upload_data, *upload_data_size); *upload_data_size = 0; return MHD_YES; } return httpsrv_handle_req(conn, url, method, *con_cls); } static void httpsrv_cleanup_request(void *cls, struct MHD_Connection *conn, void **con_cls, enum MHD_RequestTerminationCode toe) { if (*con_cls) { bytes_t *upbuf = *con_cls; bytes_free(upbuf); free(upbuf); *con_cls = NULL; } } static void httpsrv_log(void *arg, const char *fmt, va_list ap) { if (!opt_debug) return; char tmp42[LOGBUFSIZ] = "HTTPSrv: "; vsnprintf(&tmp42[9], sizeof(tmp42)-9, fmt, ap); _applog(LOG_DEBUG, tmp42); } void httpsrv_start(unsigned short port) { httpsrv = MHD_start_daemon( MHD_USE_SELECT_INTERNALLY | MHD_USE_DEBUG, port, NULL, NULL, &httpsrv_handle_access, NULL, MHD_OPTION_NOTIFY_COMPLETED, &httpsrv_cleanup_request, NULL, MHD_OPTION_EXTERNAL_LOGGER, &httpsrv_log, NULL, MHD_OPTION_END); if (httpsrv) applog(LOG_NOTICE, "HTTP server listening on port %d", (int)port); else applog(LOG_ERR, "Failed to start HTTP server on port %d", (int)port); } void httpsrv_stop() { if (!httpsrv) return; applog(LOG_DEBUG, "Stopping HTTP server"); MHD_stop_daemon(httpsrv); httpsrv = NULL; } bfgminer-bfgminer-3.10.0/httpsrv.h000066400000000000000000000003251226556647300170560ustar00rootroot00000000000000#ifndef _BFG_HTTPSRV_H #define _BFG_HTTPSRV_H #include extern void httpsrv_start(unsigned short port); extern void httpsrv_prepare_resp(struct MHD_Response *); extern void httpsrv_stop(); #endif bfgminer-bfgminer-3.10.0/icarus-common.h000066400000000000000000000055411226556647300201250ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2012 Xiangfu * Copyright 2012 Andrew Smith * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef ICARUS_COMMON_H #define ICARUS_COMMON_H #include #include #include #include "dynclock.h" #include "miner.h" // Fraction of a second, USB timeout is measured in // i.e. 10 means 1/10 of a second // Right now, it MUST be 10 due to other assumptions. #define TIME_FACTOR 10 // It's 10 per second, thus value = 10/TIME_FACTOR = #define ICARUS_READ_FAULT_DECISECONDS 1 #define NANOSEC 1000000000.0 // Default value for ICARUS_INFO->read_size #define ICARUS_DEFAULT_READ_SIZE 4 #define ICA_GETS_ERROR -1 #define ICA_GETS_OK 0 #define ICA_GETS_RESTART 1 #define ICA_GETS_TIMEOUT 2 // Store the last INFO_HISTORY data sets // [0] = current data, not yet ready to be included as an estimate // Each new data set throws the last old set off the end thus // keeping a ongoing average of recent data #define INFO_HISTORY 10 extern struct device_drv icarus_drv; struct ICARUS_HISTORY { struct timeval finish; double sumXiTi; double sumXi; double sumTi; double sumXi2; uint32_t values; uint32_t hash_count_min; uint32_t hash_count_max; }; enum timing_mode { MODE_DEFAULT, MODE_SHORT, MODE_LONG, MODE_VALUE }; struct ICARUS_INFO { // time to calculate the golden_ob struct timeval golden_tv; struct ICARUS_HISTORY history[INFO_HISTORY+1]; uint32_t min_data_count; // seconds per Hash double Hs; int read_count; // ds limit for (short=/long=) read_count int read_count_limit; enum timing_mode timing_mode; bool do_icarus_timing; int do_default_detection; double fullnonce; int count; double W; uint32_t values; uint64_t hash_count_range; // Determine the cost of history processing // (which will only affect W) uint64_t history_count; struct timeval history_time; // icarus-options int baud; int work_division; int fpga_count; uint32_t nonce_mask; int quirk_reopen; uint8_t user_set; bool continue_search; dclk_change_clock_func_t dclk_change_clock_func; struct dclk_data dclk; // Bytes to read from Icarus for nonce int read_size; }; struct icarus_state { bool firstrun; struct timeval tv_workstart; struct timeval tv_workfinish; struct work *last_work; struct work *last2_work; bool changework; bool identify; uint8_t ob_bin[64]; }; bool icarus_detect_custom(const char *devpath, struct device_drv *, struct ICARUS_INFO *); extern int icarus_gets(unsigned char *, int fd, struct timeval *tv_finish, struct thr_info *, int read_count, int read_size); extern int icarus_write(int fd, const void *buf, size_t bufLen); #endif bfgminer-bfgminer-3.10.0/iospeeds.h000066400000000000000000000004301226556647300171540ustar00rootroot00000000000000#include IOSPEED(57600) IOSPEED(115200) IOSPEED(230400) IOSPEED(460800) IOSPEED(921600) IOSPEED(576000) IOSPEED(1152000) IOSPEED(1500000) IOSPEED(3000000) IOSPEED(500000) IOSPEED(1000000) IOSPEED(2000000) IOSPEED(4000000) IOSPEED(2500000) IOSPEED(3500000) bfgminer-bfgminer-3.10.0/iospeeds_posix.h000066400000000000000000000003261226556647300204020ustar00rootroot00000000000000IOSPEED(0) IOSPEED(50) IOSPEED(110) IOSPEED(134) IOSPEED(200) IOSPEED(75) IOSPEED(150) IOSPEED(300) IOSPEED(600) IOSPEED(1200) IOSPEED(1800) IOSPEED(2400) IOSPEED(4800) IOSPEED(9600) IOSPEED(19200) IOSPEED(38400) bfgminer-bfgminer-3.10.0/jtag.c000066400000000000000000000162161226556647300162720ustar00rootroot00000000000000/* * Copyright 2012 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ // NOTE: This code is based on code Luke-Jr wrote originally for LPC1343CodeBase #include "config.h" #include #include #include #include #include "ft232r.h" #include "jtag.h" #include "logging.h" #include "miner.h" //#define DEBUG_JTAG_CLOCK #define FTDI_READ_BUFFER_SIZE 100 static unsigned char jtag_clock_byte(struct jtag_port *jp, bool tms, bool tdi) { return (jp->a->state & jp->ignored) | (tms ? jp->tms : 0) | (tdi ? jp->tdi : 0); } // NOTE: The order of tms and tdi here are inverted from LPC1343CodeBase bool jtag_clock(struct jtag_port *jp, bool tms, bool tdi, bool *tdo) { unsigned char bufsz = tdo ? 3 : 2; unsigned char buf[3]; memset(buf, jtag_clock_byte(jp, tms, tdi), sizeof(buf)); buf[2] = buf[1] |= jp->tck; if (ft232r_write_all(jp->a->ftdi, buf, bufsz) != bufsz) return false; jp->a->state = buf[2]; if (jp->a->async) { if (unlikely(tdo)) applog(LOG_WARNING, "jtag_clock: request for tdo in async mode not possible"); #ifdef DEBUG_JTAG_CLOCK applog(LOG_DEBUG, "%p %02x tms=%d tdi=%d tdo=?async", jp, (unsigned)buf[2], (int)tms, (int)tdi); #endif return true; } jp->a->bufread += bufsz; if (jp->a->bufread < FTDI_READ_BUFFER_SIZE - sizeof(buf) && !tdo) { // By deferring unnecessary reads, we can avoid some USB latency #ifdef DEBUG_JTAG_CLOCK applog(LOG_DEBUG, "%p %02x tms=%d tdi=%d tdo=?defer", jp, (unsigned)buf[2], (int)tms, (int)tdi); #endif return true; } #if 0 /* untested */ else if (!tdo) { if (ft232r_purge_buffers(jp->a->ftdi, FTDI_PURGE_BOTH)) { jp->bufread = 0; #ifdef DEBUG_JTAG_CLOCK applog(LOG_DEBUG, "%p %02x tms=%d tdi=%d tdo=?purge", jp, (unsigned)buf[2], (int)tms, (int)tdi); #endif return true; } } #endif uint8_t rbufsz = jp->a->bufread; jp->a->bufread = 0; unsigned char rbuf[rbufsz]; if (ft232r_read_all(jp->a->ftdi, rbuf, rbufsz) != rbufsz) return false; if (tdo) { *tdo = (rbuf[rbufsz-1] & jp->tdo); #ifdef DEBUG_JTAG_CLOCK char x[(rbufsz * 2) + 1]; bin2hex(x, rbuf, rbufsz); applog(LOG_DEBUG, "%p %02x tms=%d tdi=%d tdo=%d (%u:%s)", jp, (unsigned)rbuf[rbufsz-1], (int)tms, (int)tdi, (int)(bool)(rbuf[rbufsz-1] & jp->tdo), (unsigned)rbufsz, x); } else { applog(LOG_DEBUG, "%p %02x tms=%d tdi=%d tdo=?ignore", jp, (unsigned)buf[2], (int)tms, (int)tdi); #endif } return true; } static bool jtag_rw_bit(struct jtag_port *jp, void *buf, uint8_t mask, bool tms, bool do_read) { uint8_t *byte = buf; bool tdo; if (!jtag_clock(jp, tms, byte[0] & mask, do_read ? &tdo : NULL)) return false; if (do_read) { if (tdo) byte[0] |= mask; else byte[0] &= ~mask; } return true; } static inline bool getbit(void *data, uint32_t bitnum) { unsigned char *cdata = data; div_t d = div(bitnum, 8); unsigned char b = cdata[d.quot]; return b & (1<<(7 - d.rem)); } static inline void setbit(void *data, uint32_t bitnum, bool nv) { unsigned char *cdata = data; div_t d = div(bitnum, 8); unsigned char *p = &cdata[d.quot]; unsigned char o = (1<<(7 - d.rem)); if (nv) *p |= o; else *p &= ~o; } // Expects to start at the Capture step, to handle 0-length gracefully bool _jtag_llrw(struct jtag_port *jp, void *buf, size_t bitlength, bool do_read, int stage) { uint8_t *data = buf; if (!bitlength) return jtag_clock(jp, true, false, NULL); if (stage & 1) if (!jtag_clock(jp, false, false, NULL)) return false; #ifndef DEBUG_JTAG_CLOCK // This alternate implementation is designed to minimize ft232r reads (which are slow) if (do_read) { unsigned char rbuf[FTDI_READ_BUFFER_SIZE]; unsigned char wbuf[3]; ssize_t rbufsz, bitspending = 0; size_t databitoff = 0, i; --bitlength; for (i = 0; i < bitlength; ++i) { wbuf[0] = jtag_clock_byte(jp, false, getbit(data, i)); wbuf[1] = wbuf[0] | jp->tck; if (ft232r_write_all(jp->a->ftdi, wbuf, 2) != 2) return false; jp->a->bufread += 2; ++bitspending; if (jp->a->bufread > FTDI_READ_BUFFER_SIZE - 2) { // The next bit would overflow, so read now rbufsz = jp->a->bufread; if (ft232r_read_all(jp->a->ftdi, rbuf, rbufsz) != rbufsz) return false; for (ssize_t j = rbufsz - ((bitspending - 1) * 2); j < rbufsz; j += 2) setbit(data, databitoff++, (rbuf[j] & jp->tdo)); bitspending = 1; jp->a->bufread = 0; } } // Last bit needs special treatment wbuf[0] = jtag_clock_byte(jp, (stage & 2), getbit(data, i)); wbuf[2] = wbuf[1] = wbuf[0] | jp->tck; if (ft232r_write_all(jp->a->ftdi, wbuf, sizeof(wbuf)) != sizeof(wbuf)) return false; rbufsz = jp->a->bufread + 3; if (ft232r_read_all(jp->a->ftdi, rbuf, rbufsz) != rbufsz) return false; --rbufsz; for (ssize_t j = rbufsz - (bitspending * 2); j < rbufsz; j += 2) setbit(data, databitoff++, (rbuf[j] & jp->tdo)); setbit(data, databitoff++, (rbuf[rbufsz] & jp->tdo)); jp->a->bufread = 0; if (stage & 2) { if (!jtag_clock(jp, true, false, NULL)) // Update return false; } return true; } #endif int i, j; div_t d; d = div(bitlength - 1, 8); for (i = 0; i < d.quot; ++i) { for (j = 0x80; j; j /= 2) { if (!jtag_rw_bit(jp, &data[i], j, false, do_read)) return false; } } for (j = 0; j < d.rem; ++j) if (!jtag_rw_bit(jp, &data[i], 0x80 >> j, false, do_read)) return false; if (stage & 2) { if (!jtag_rw_bit(jp, &data[i], 0x80 >> j, true, do_read)) return false; if (!jtag_clock(jp, true, false, NULL)) // Update return false; } else if (!jtag_rw_bit(jp, &data[i], 0x80 >> j, false, do_read)) return false; return true; } bool jtag_reset(struct jtag_port *jp) { for (int i = 0; i < 5; ++i) if (!jtag_clock(jp, true, false, NULL)) return false; return jtag_clock(jp, false, false, NULL); } // Returns -1 for failure, -2 for unknown, or zero and higher for number of devices ssize_t jtag_detect(struct jtag_port *jp) { // TODO: detect more than 1 device int i; bool tdo; if (!(1 && jtag_write(jp, JTAG_REG_IR, "\xff", 8) && jtag_clock(jp, true , false, NULL) // Select DR && jtag_clock(jp, false, false, NULL) // Capture DR && jtag_clock(jp, false, false, NULL) // Shift DR )) return -1; for (i = 0; i < 4; ++i) if (!jtag_clock(jp, false, false, NULL)) return -1; if (!jtag_clock(jp, false, false, &tdo)) return -1; if (tdo) return -1; for (i = 0; i < 4; ++i) { if (!jtag_clock(jp, false, true, &tdo)) return -1; if (tdo) break; } if (!jtag_reset(jp)) return -1; return i < 2 ? i : -2; } bool _jtag_rw(struct jtag_port *jp, enum jtagreg r, void *buf, size_t bitlength, bool do_read, int stage) { if (!jtag_clock(jp, true, false, NULL)) // Select DR return false; if (r == JTAG_REG_IR) if (!jtag_clock(jp, true, false, NULL)) // Select IR return false; if (!jtag_clock(jp, false, false, NULL)) // Capture return false; return _jtag_llrw(jp, buf, bitlength, do_read, stage); // Exit1 } bool jtag_run(struct jtag_port *jp) { return jtag_clock(jp, false, false, NULL); } bfgminer-bfgminer-3.10.0/jtag.h000066400000000000000000000034201226556647300162700ustar00rootroot00000000000000/* * Copyright 2012 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef BFGMINER_JTAG_H #define BFGMINER_JTAG_H #include #include #include struct jtag_port_a { struct ft232r_device_handle *ftdi; uint8_t state; bool async; uint8_t bufread; }; struct jtag_port { struct jtag_port_a *a; uint8_t tck; uint8_t tms; uint8_t tdi; uint8_t tdo; uint8_t ignored; }; enum jtagreg { JTAG_REG_DR, JTAG_REG_IR, }; extern bool jtag_clock(struct jtag_port *, bool tms, bool tdi, bool *tdo); extern bool _jtag_llrw(struct jtag_port *, void *buf, size_t bitlength, bool do_read, int stage); extern bool jtag_reset(struct jtag_port *); extern ssize_t jtag_detect(struct jtag_port *); extern bool _jtag_rw(struct jtag_port *, enum jtagreg r, void *buf, size_t bitlength, bool do_read, int stage); #define jtag_read(jp, r, data, bitlen) _jtag_rw(jp, r, data, bitlen, true, 0xff) #define jtag_sread(jp, r, data, bitlen) _jtag_rw(jp, r, data, bitlen, true, 1) #define jtag_sread_more(jp, data, bitlen, finish) _jtag_llrw(jp, data, bitlen, true, (finish) ? 2 : 0) // Cast is used to accept const data - while it ignores the compiler attribute, it still won't modify the data #define jtag_write(jp, r, data, bitlen) _jtag_rw(jp, r, (void*)data, bitlen, false, 0xff) #define jtag_swrite(jp, r, data, bitlen) _jtag_rw(jp, r, (void*)data, bitlen, false, 1) #define jtag_swrite_more(jp, data, bitlen, finish) _jtag_llrw(jp, (void*)data, bitlen, false, (finish) ? 2 : 0) extern bool jtag_run(struct jtag_port *); #endif bfgminer-bfgminer-3.10.0/lib/000077500000000000000000000000001226556647300157415ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/lib/Makefile.am000066400000000000000000000336661226556647300200130ustar00rootroot00000000000000## DO NOT EDIT! GENERATED AUTOMATICALLY! ## Process this file with automake to produce Makefile.in. # Copyright (C) 2002-2011 Free Software Foundation, Inc. # # This file is free software, distributed under the terms of the GNU # General Public License. As a special exception to the GNU General # Public License, this file may be distributed as part of a program # that contains a configuration script generated by Autoconf, under # the same distribution terms as the rest of that program. # # Generated by gnulib-tool. # Reproduce by: gnulib-tool --import --dir=. --lib=libgnu --source-base=lib --m4-base=m4 --doc-base=doc --tests-base=tests --aux-dir=. --no-conditional-dependencies --no-libtool --macro-prefix=gl --no-vc-files memmem sigaction signal strtok_r AUTOMAKE_OPTIONS = 1.5 gnits SUBDIRS = noinst_HEADERS = noinst_LIBRARIES = noinst_LTLIBRARIES = EXTRA_DIST = BUILT_SOURCES = SUFFIXES = MOSTLYCLEANFILES = core *.stackdump MOSTLYCLEANDIRS = CLEANFILES = DISTCLEANFILES = MAINTAINERCLEANFILES = AM_CPPFLAGS = AM_CFLAGS = noinst_LIBRARIES += libgnu.a libgnu_a_SOURCES = libgnu_a_LIBADD = $(gl_LIBOBJS) libgnu_a_DEPENDENCIES = $(gl_LIBOBJS) EXTRA_libgnu_a_SOURCES = ## begin gnulib module arg-nonnull # The BUILT_SOURCES created by this Makefile snippet are not used via #include # statements but through direct file reference. Therefore this snippet must be # present in all Makefile.am that need it. This is ensured by the applicability # 'all' defined above. BUILT_SOURCES += arg-nonnull.h # The arg-nonnull.h that gets inserted into generated .h files is the same as # build-aux/arg-nonnull.h, except that it has the copyright header cut off. arg-nonnull.h: $(top_srcdir)/./arg-nonnull.h $(AM_V_GEN)rm -f $@-t $@ && \ sed -n -e '/GL_ARG_NONNULL/,$$p' \ < $(top_srcdir)/./arg-nonnull.h \ > $@-t && \ mv $@-t $@ MOSTLYCLEANFILES += arg-nonnull.h arg-nonnull.h-t ARG_NONNULL_H=arg-nonnull.h EXTRA_DIST += $(top_srcdir)/./arg-nonnull.h ## end gnulib module arg-nonnull ## begin gnulib module c++defs # The BUILT_SOURCES created by this Makefile snippet are not used via #include # statements but through direct file reference. Therefore this snippet must be # present in all Makefile.am that need it. This is ensured by the applicability # 'all' defined above. BUILT_SOURCES += c++defs.h # The c++defs.h that gets inserted into generated .h files is the same as # build-aux/c++defs.h, except that it has the copyright header cut off. c++defs.h: $(top_srcdir)/./c++defs.h $(AM_V_GEN)rm -f $@-t $@ && \ sed -n -e '/_GL_CXXDEFS/,$$p' \ < $(top_srcdir)/./c++defs.h \ > $@-t && \ mv $@-t $@ MOSTLYCLEANFILES += c++defs.h c++defs.h-t CXXDEFS_H=c++defs.h EXTRA_DIST += $(top_srcdir)/./c++defs.h ## end gnulib module c++defs ## begin gnulib module memchr EXTRA_DIST += memchr.c memchr.valgrind EXTRA_libgnu_a_SOURCES += memchr.c ## end gnulib module memchr ## begin gnulib module memmem-simple EXTRA_DIST += memmem.c str-two-way.h EXTRA_libgnu_a_SOURCES += memmem.c ## end gnulib module memmem-simple ## begin gnulib module sigaction EXTRA_DIST += sig-handler.h sigaction.c EXTRA_libgnu_a_SOURCES += sigaction.c ## end gnulib module sigaction ## begin gnulib module signal BUILT_SOURCES += signal.h # We need the following in order to create when the system # doesn't have a complete one. signal.h: signal.in.h $(top_builddir)/config.status $(CXXDEFS_H) $(ARG_NONNULL_H) $(WARN_ON_USE_H) $(AM_V_GEN)rm -f $@-t $@ && \ { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */' && \ sed -e 's|@''GUARD_PREFIX''@|GL|g' \ -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ -e 's|@''NEXT_SIGNAL_H''@|$(NEXT_SIGNAL_H)|g' \ -e 's/@''GNULIB_SIGNAL_H_SIGPIPE''@/$(GNULIB_SIGNAL_H_SIGPIPE)/g' \ -e 's/@''GNULIB_SIGPROCMASK''@/$(GNULIB_SIGPROCMASK)/g' \ -e 's/@''GNULIB_SIGACTION''@/$(GNULIB_SIGACTION)/g' \ -e 's|@''HAVE_POSIX_SIGNALBLOCKING''@|$(HAVE_POSIX_SIGNALBLOCKING)|g' \ -e 's|@''HAVE_SIGSET_T''@|$(HAVE_SIGSET_T)|g' \ -e 's|@''HAVE_SIGINFO_T''@|$(HAVE_SIGINFO_T)|g' \ -e 's|@''HAVE_SIGACTION''@|$(HAVE_SIGACTION)|g' \ -e 's|@''HAVE_STRUCT_SIGACTION_SA_SIGACTION''@|$(HAVE_STRUCT_SIGACTION_SA_SIGACTION)|g' \ -e 's|@''HAVE_TYPE_VOLATILE_SIG_ATOMIC_T''@|$(HAVE_TYPE_VOLATILE_SIG_ATOMIC_T)|g' \ -e 's|@''HAVE_SIGHANDLER_T''@|$(HAVE_SIGHANDLER_T)|g' \ -e '/definitions of _GL_FUNCDECL_RPL/r $(CXXDEFS_H)' \ -e '/definition of _GL_ARG_NONNULL/r $(ARG_NONNULL_H)' \ -e '/definition of _GL_WARN_ON_USE/r $(WARN_ON_USE_H)' \ < $(srcdir)/signal.in.h; \ } > $@-t && \ mv $@-t $@ MOSTLYCLEANFILES += signal.h signal.h-t EXTRA_DIST += signal.in.h ## end gnulib module signal ## begin gnulib module sigprocmask EXTRA_DIST += sigprocmask.c EXTRA_libgnu_a_SOURCES += sigprocmask.c ## end gnulib module sigprocmask ## begin gnulib module stddef BUILT_SOURCES += $(STDDEF_H) # We need the following in order to create when the system # doesn't have one that works with the given compiler. if GL_GENERATE_STDDEF_H stddef.h: stddef.in.h $(top_builddir)/config.status $(AM_V_GEN)rm -f $@-t $@ && \ { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */' && \ sed -e 's|@''GUARD_PREFIX''@|GL|g' \ -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ -e 's|@''NEXT_STDDEF_H''@|$(NEXT_STDDEF_H)|g' \ -e 's|@''HAVE_WCHAR_T''@|$(HAVE_WCHAR_T)|g' \ -e 's|@''REPLACE_NULL''@|$(REPLACE_NULL)|g' \ < $(srcdir)/stddef.in.h; \ } > $@-t && \ mv $@-t $@ else stddef.h: $(top_builddir)/config.status rm -f $@ endif MOSTLYCLEANFILES += stddef.h stddef.h-t EXTRA_DIST += stddef.in.h ## end gnulib module stddef ## begin gnulib module stdint BUILT_SOURCES += $(STDINT_H) # We need the following in order to create when the system # doesn't have one that works with the given compiler. if GL_GENERATE_STDINT_H stdint.h: stdint.in.h $(top_builddir)/config.status $(AM_V_GEN)rm -f $@-t $@ && \ { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */'; \ sed -e 's|@''GUARD_PREFIX''@|GL|g' \ -e 's/@''HAVE_STDINT_H''@/$(HAVE_STDINT_H)/g' \ -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ -e 's|@''NEXT_STDINT_H''@|$(NEXT_STDINT_H)|g' \ -e 's/@''HAVE_SYS_TYPES_H''@/$(HAVE_SYS_TYPES_H)/g' \ -e 's/@''HAVE_INTTYPES_H''@/$(HAVE_INTTYPES_H)/g' \ -e 's/@''HAVE_SYS_INTTYPES_H''@/$(HAVE_SYS_INTTYPES_H)/g' \ -e 's/@''HAVE_SYS_BITYPES_H''@/$(HAVE_SYS_BITYPES_H)/g' \ -e 's/@''HAVE_WCHAR_H''@/$(HAVE_WCHAR_H)/g' \ -e 's/@''HAVE_LONG_LONG_INT''@/$(HAVE_LONG_LONG_INT)/g' \ -e 's/@''HAVE_UNSIGNED_LONG_LONG_INT''@/$(HAVE_UNSIGNED_LONG_LONG_INT)/g' \ -e 's/@''APPLE_UNIVERSAL_BUILD''@/$(APPLE_UNIVERSAL_BUILD)/g' \ -e 's/@''BITSIZEOF_PTRDIFF_T''@/$(BITSIZEOF_PTRDIFF_T)/g' \ -e 's/@''PTRDIFF_T_SUFFIX''@/$(PTRDIFF_T_SUFFIX)/g' \ -e 's/@''BITSIZEOF_SIG_ATOMIC_T''@/$(BITSIZEOF_SIG_ATOMIC_T)/g' \ -e 's/@''HAVE_SIGNED_SIG_ATOMIC_T''@/$(HAVE_SIGNED_SIG_ATOMIC_T)/g' \ -e 's/@''SIG_ATOMIC_T_SUFFIX''@/$(SIG_ATOMIC_T_SUFFIX)/g' \ -e 's/@''BITSIZEOF_SIZE_T''@/$(BITSIZEOF_SIZE_T)/g' \ -e 's/@''SIZE_T_SUFFIX''@/$(SIZE_T_SUFFIX)/g' \ -e 's/@''BITSIZEOF_WCHAR_T''@/$(BITSIZEOF_WCHAR_T)/g' \ -e 's/@''HAVE_SIGNED_WCHAR_T''@/$(HAVE_SIGNED_WCHAR_T)/g' \ -e 's/@''WCHAR_T_SUFFIX''@/$(WCHAR_T_SUFFIX)/g' \ -e 's/@''BITSIZEOF_WINT_T''@/$(BITSIZEOF_WINT_T)/g' \ -e 's/@''HAVE_SIGNED_WINT_T''@/$(HAVE_SIGNED_WINT_T)/g' \ -e 's/@''WINT_T_SUFFIX''@/$(WINT_T_SUFFIX)/g' \ < $(srcdir)/stdint.in.h; \ } > $@-t && \ mv $@-t $@ else stdint.h: $(top_builddir)/config.status rm -f $@ endif MOSTLYCLEANFILES += stdint.h stdint.h-t EXTRA_DIST += stdint.in.h ## end gnulib module stdint ## begin gnulib module string BUILT_SOURCES += string.h # We need the following in order to create when the system # doesn't have one that works with the given compiler. string.h: string.in.h $(top_builddir)/config.status $(CXXDEFS_H) $(ARG_NONNULL_H) $(WARN_ON_USE_H) $(AM_V_GEN)rm -f $@-t $@ && \ { echo '/* DO NOT EDIT! GENERATED AUTOMATICALLY! */' && \ sed -e 's|@''GUARD_PREFIX''@|GL|g' \ -e 's|@''INCLUDE_NEXT''@|$(INCLUDE_NEXT)|g' \ -e 's|@''PRAGMA_SYSTEM_HEADER''@|@PRAGMA_SYSTEM_HEADER@|g' \ -e 's|@''PRAGMA_COLUMNS''@|@PRAGMA_COLUMNS@|g' \ -e 's|@''NEXT_STRING_H''@|$(NEXT_STRING_H)|g' \ -e 's/@''GNULIB_MBSLEN''@/$(GNULIB_MBSLEN)/g' \ -e 's/@''GNULIB_MBSNLEN''@/$(GNULIB_MBSNLEN)/g' \ -e 's/@''GNULIB_MBSCHR''@/$(GNULIB_MBSCHR)/g' \ -e 's/@''GNULIB_MBSRCHR''@/$(GNULIB_MBSRCHR)/g' \ -e 's/@''GNULIB_MBSSTR''@/$(GNULIB_MBSSTR)/g' \ -e 's/@''GNULIB_MBSCASECMP''@/$(GNULIB_MBSCASECMP)/g' \ -e 's/@''GNULIB_MBSNCASECMP''@/$(GNULIB_MBSNCASECMP)/g' \ -e 's/@''GNULIB_MBSPCASECMP''@/$(GNULIB_MBSPCASECMP)/g' \ -e 's/@''GNULIB_MBSCASESTR''@/$(GNULIB_MBSCASESTR)/g' \ -e 's/@''GNULIB_MBSCSPN''@/$(GNULIB_MBSCSPN)/g' \ -e 's/@''GNULIB_MBSPBRK''@/$(GNULIB_MBSPBRK)/g' \ -e 's/@''GNULIB_MBSSPN''@/$(GNULIB_MBSSPN)/g' \ -e 's/@''GNULIB_MBSSEP''@/$(GNULIB_MBSSEP)/g' \ -e 's/@''GNULIB_MBSTOK_R''@/$(GNULIB_MBSTOK_R)/g' \ -e 's/@''GNULIB_MEMCHR''@/$(GNULIB_MEMCHR)/g' \ -e 's/@''GNULIB_MEMMEM''@/$(GNULIB_MEMMEM)/g' \ -e 's/@''GNULIB_MEMPCPY''@/$(GNULIB_MEMPCPY)/g' \ -e 's/@''GNULIB_MEMRCHR''@/$(GNULIB_MEMRCHR)/g' \ -e 's/@''GNULIB_RAWMEMCHR''@/$(GNULIB_RAWMEMCHR)/g' \ -e 's/@''GNULIB_STPCPY''@/$(GNULIB_STPCPY)/g' \ -e 's/@''GNULIB_STPNCPY''@/$(GNULIB_STPNCPY)/g' \ -e 's/@''GNULIB_STRCHRNUL''@/$(GNULIB_STRCHRNUL)/g' \ -e 's/@''GNULIB_STRDUP''@/$(GNULIB_STRDUP)/g' \ -e 's/@''GNULIB_STRNCAT''@/$(GNULIB_STRNCAT)/g' \ -e 's/@''GNULIB_STRNDUP''@/$(GNULIB_STRNDUP)/g' \ -e 's/@''GNULIB_STRNLEN''@/$(GNULIB_STRNLEN)/g' \ -e 's/@''GNULIB_STRPBRK''@/$(GNULIB_STRPBRK)/g' \ -e 's/@''GNULIB_STRSEP''@/$(GNULIB_STRSEP)/g' \ -e 's/@''GNULIB_STRSTR''@/$(GNULIB_STRSTR)/g' \ -e 's/@''GNULIB_STRCASESTR''@/$(GNULIB_STRCASESTR)/g' \ -e 's/@''GNULIB_STRTOK_R''@/$(GNULIB_STRTOK_R)/g' \ -e 's/@''GNULIB_STRERROR''@/$(GNULIB_STRERROR)/g' \ -e 's/@''GNULIB_STRERROR_R''@/$(GNULIB_STRERROR_R)/g' \ -e 's/@''GNULIB_STRSIGNAL''@/$(GNULIB_STRSIGNAL)/g' \ -e 's/@''GNULIB_STRVERSCMP''@/$(GNULIB_STRVERSCMP)/g' \ < $(srcdir)/string.in.h | \ sed -e 's|@''HAVE_MBSLEN''@|$(HAVE_MBSLEN)|g' \ -e 's|@''HAVE_MEMCHR''@|$(HAVE_MEMCHR)|g' \ -e 's|@''HAVE_DECL_MEMMEM''@|$(HAVE_DECL_MEMMEM)|g' \ -e 's|@''HAVE_MEMPCPY''@|$(HAVE_MEMPCPY)|g' \ -e 's|@''HAVE_DECL_MEMRCHR''@|$(HAVE_DECL_MEMRCHR)|g' \ -e 's|@''HAVE_RAWMEMCHR''@|$(HAVE_RAWMEMCHR)|g' \ -e 's|@''HAVE_STPCPY''@|$(HAVE_STPCPY)|g' \ -e 's|@''HAVE_STPNCPY''@|$(HAVE_STPNCPY)|g' \ -e 's|@''HAVE_STRCHRNUL''@|$(HAVE_STRCHRNUL)|g' \ -e 's|@''HAVE_DECL_STRDUP''@|$(HAVE_DECL_STRDUP)|g' \ -e 's|@''HAVE_DECL_STRNDUP''@|$(HAVE_DECL_STRNDUP)|g' \ -e 's|@''HAVE_DECL_STRNLEN''@|$(HAVE_DECL_STRNLEN)|g' \ -e 's|@''HAVE_STRPBRK''@|$(HAVE_STRPBRK)|g' \ -e 's|@''HAVE_STRSEP''@|$(HAVE_STRSEP)|g' \ -e 's|@''HAVE_STRCASESTR''@|$(HAVE_STRCASESTR)|g' \ -e 's|@''HAVE_DECL_STRTOK_R''@|$(HAVE_DECL_STRTOK_R)|g' \ -e 's|@''HAVE_DECL_STRERROR_R''@|$(HAVE_DECL_STRERROR_R)|g' \ -e 's|@''HAVE_DECL_STRSIGNAL''@|$(HAVE_DECL_STRSIGNAL)|g' \ -e 's|@''HAVE_STRVERSCMP''@|$(HAVE_STRVERSCMP)|g' \ -e 's|@''REPLACE_STPNCPY''@|$(REPLACE_STPNCPY)|g' \ -e 's|@''REPLACE_MEMCHR''@|$(REPLACE_MEMCHR)|g' \ -e 's|@''REPLACE_MEMMEM''@|$(REPLACE_MEMMEM)|g' \ -e 's|@''REPLACE_STRCASESTR''@|$(REPLACE_STRCASESTR)|g' \ -e 's|@''REPLACE_STRCHRNUL''@|$(REPLACE_STRCHRNUL)|g' \ -e 's|@''REPLACE_STRDUP''@|$(REPLACE_STRDUP)|g' \ -e 's|@''REPLACE_STRSTR''@|$(REPLACE_STRSTR)|g' \ -e 's|@''REPLACE_STRERROR''@|$(REPLACE_STRERROR)|g' \ -e 's|@''REPLACE_STRERROR_R''@|$(REPLACE_STRERROR_R)|g' \ -e 's|@''REPLACE_STRNCAT''@|$(REPLACE_STRNCAT)|g' \ -e 's|@''REPLACE_STRNDUP''@|$(REPLACE_STRNDUP)|g' \ -e 's|@''REPLACE_STRNLEN''@|$(REPLACE_STRNLEN)|g' \ -e 's|@''REPLACE_STRSIGNAL''@|$(REPLACE_STRSIGNAL)|g' \ -e 's|@''REPLACE_STRTOK_R''@|$(REPLACE_STRTOK_R)|g' \ -e 's|@''UNDEFINE_STRTOK_R''@|$(UNDEFINE_STRTOK_R)|g' \ -e '/definitions of _GL_FUNCDECL_RPL/r $(CXXDEFS_H)' \ -e '/definition of _GL_ARG_NONNULL/r $(ARG_NONNULL_H)' \ -e '/definition of _GL_WARN_ON_USE/r $(WARN_ON_USE_H)'; \ < $(srcdir)/string.in.h; \ } > $@-t && \ mv $@-t $@ MOSTLYCLEANFILES += string.h string.h-t EXTRA_DIST += string.in.h ## end gnulib module string ## begin gnulib module strtok_r EXTRA_DIST += strtok_r.c EXTRA_libgnu_a_SOURCES += strtok_r.c ## end gnulib module strtok_r ## begin gnulib module warn-on-use BUILT_SOURCES += warn-on-use.h # The warn-on-use.h that gets inserted into generated .h files is the same as # build-aux/warn-on-use.h, except that it has the copyright header cut off. warn-on-use.h: $(top_srcdir)/./warn-on-use.h $(AM_V_GEN)rm -f $@-t $@ && \ sed -n -e '/^.ifndef/,$$p' \ < $(top_srcdir)/./warn-on-use.h \ > $@-t && \ mv $@-t $@ MOSTLYCLEANFILES += warn-on-use.h warn-on-use.h-t WARN_ON_USE_H=warn-on-use.h EXTRA_DIST += $(top_srcdir)/./warn-on-use.h ## end gnulib module warn-on-use ## begin gnulib module dummy libgnu_a_SOURCES += dummy.c ## end gnulib module dummy mostlyclean-local: mostlyclean-generic @for dir in '' $(MOSTLYCLEANDIRS); do \ if test -n "$$dir" && test -d $$dir; then \ echo "rmdir $$dir"; rmdir $$dir; \ fi; \ done; \ : bfgminer-bfgminer-3.10.0/lib/dummy.c000066400000000000000000000032541226556647300172440ustar00rootroot00000000000000/* A dummy file, to prevent empty libraries from breaking builds. Copyright (C) 2004, 2007, 2009-2011 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* Some systems, reportedly OpenBSD and Mac OS X, refuse to create libraries without any object files. You might get an error like: > ar cru .libs/libgl.a > ar: no archive members specified Compiling this file, and adding its object file to the library, will prevent the library from being empty. */ /* Some systems, such as Solaris with cc 5.0, refuse to work with libraries that don't export any symbol. You might get an error like: > cc ... libgnu.a > ild: (bad file) garbled symbol table in archive ../gllib/libgnu.a Compiling this file, and adding its object file to the library, will prevent the library from exporting no symbols. */ #ifdef __sun /* This declaration ensures that the library will export at least 1 symbol. */ int gl_dummy_symbol; #else /* This declaration is solely to ensure that after preprocessing this file is never empty. */ typedef int dummy; #endif bfgminer-bfgminer-3.10.0/lib/memchr.c000066400000000000000000000133461226556647300173670ustar00rootroot00000000000000/* Copyright (C) 1991, 1993, 1996-1997, 1999-2000, 2003-2004, 2006, 2008-2011 Free Software Foundation, Inc. Based on strlen implementation by Torbjorn Granlund (tege@sics.se), with help from Dan Sahlin (dan@sics.se) and commentary by Jim Blandy (jimb@ai.mit.edu); adaptation to memchr suggested by Dick Karpinski (dick@cca.ucsf.edu), and implemented by Roland McGrath (roland@ai.mit.edu). NOTE: The canonical source of this file is maintained with the GNU C Library. Bugs can be reported to bug-glibc@prep.ai.mit.edu. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #ifndef _LIBC # include #endif #include #include #if defined _LIBC # include #else # define reg_char char #endif #include #if HAVE_BP_SYM_H || defined _LIBC # include #else # define BP_SYM(sym) sym #endif #undef __memchr #ifdef _LIBC # undef memchr #endif #ifndef weak_alias # define __memchr memchr #endif /* Search no more than N bytes of S for C. */ void * __memchr (void const *s, int c_in, size_t n) { /* On 32-bit hardware, choosing longword to be a 32-bit unsigned long instead of a 64-bit uintmax_t tends to give better performance. On 64-bit hardware, unsigned long is generally 64 bits already. Change this typedef to experiment with performance. */ typedef unsigned long int longword; const unsigned char *char_ptr; const longword *longword_ptr; longword repeated_one; longword repeated_c; unsigned reg_char c; c = (unsigned char) c_in; /* Handle the first few bytes by reading one byte at a time. Do this until CHAR_PTR is aligned on a longword boundary. */ for (char_ptr = (const unsigned char *) s; n > 0 && (size_t) char_ptr % sizeof (longword) != 0; --n, ++char_ptr) if (*char_ptr == c) return (void *) char_ptr; longword_ptr = (const longword *) char_ptr; /* All these elucidatory comments refer to 4-byte longwords, but the theory applies equally well to any size longwords. */ /* Compute auxiliary longword values: repeated_one is a value which has a 1 in every byte. repeated_c has c in every byte. */ repeated_one = 0x01010101; repeated_c = c | (c << 8); repeated_c |= repeated_c << 16; if (0xffffffffU < (longword) -1) { repeated_one |= repeated_one << 31 << 1; repeated_c |= repeated_c << 31 << 1; if (8 < sizeof (longword)) { size_t i; for (i = 64; i < sizeof (longword) * 8; i *= 2) { repeated_one |= repeated_one << i; repeated_c |= repeated_c << i; } } } /* Instead of the traditional loop which tests each byte, we will test a longword at a time. The tricky part is testing if *any of the four* bytes in the longword in question are equal to c. We first use an xor with repeated_c. This reduces the task to testing whether *any of the four* bytes in longword1 is zero. We compute tmp = ((longword1 - repeated_one) & ~longword1) & (repeated_one << 7). That is, we perform the following operations: 1. Subtract repeated_one. 2. & ~longword1. 3. & a mask consisting of 0x80 in every byte. Consider what happens in each byte: - If a byte of longword1 is zero, step 1 and 2 transform it into 0xff, and step 3 transforms it into 0x80. A carry can also be propagated to more significant bytes. - If a byte of longword1 is nonzero, let its lowest 1 bit be at position k (0 <= k <= 7); so the lowest k bits are 0. After step 1, the byte ends in a single bit of value 0 and k bits of value 1. After step 2, the result is just k bits of value 1: 2^k - 1. After step 3, the result is 0. And no carry is produced. So, if longword1 has only non-zero bytes, tmp is zero. Whereas if longword1 has a zero byte, call j the position of the least significant zero byte. Then the result has a zero at positions 0, ..., j-1 and a 0x80 at position j. We cannot predict the result at the more significant bytes (positions j+1..3), but it does not matter since we already have a non-zero bit at position 8*j+7. So, the test whether any byte in longword1 is zero is equivalent to testing whether tmp is nonzero. */ while (n >= sizeof (longword)) { longword longword1 = *longword_ptr ^ repeated_c; if ((((longword1 - repeated_one) & ~longword1) & (repeated_one << 7)) != 0) break; longword_ptr++; n -= sizeof (longword); } char_ptr = (const unsigned char *) longword_ptr; /* At this point, we know that either n < sizeof (longword), or one of the sizeof (longword) bytes starting at char_ptr is == c. On little-endian machines, we could determine the first such byte without any further memory accesses, just by looking at the tmp result from the last loop iteration. But this does not work on big-endian machines. Choose code that works in both cases. */ for (; n > 0; --n, ++char_ptr) { if (*char_ptr == c) return (void *) char_ptr; } return NULL; } #ifdef weak_alias weak_alias (__memchr, BP_SYM (memchr)) #endif bfgminer-bfgminer-3.10.0/lib/memchr.valgrind000066400000000000000000000006521226556647300207470ustar00rootroot00000000000000# Suppress a valgrind message about use of uninitialized memory in memchr(). # POSIX states that when the character is found, memchr must not read extra # bytes in an overestimated length (for example, where memchr is used to # implement strnlen). However, we use a safe word read to provide a speedup. { memchr-value4 Memcheck:Value4 fun:rpl_memchr } { memchr-value8 Memcheck:Value8 fun:rpl_memchr } bfgminer-bfgminer-3.10.0/lib/memmem.c000066400000000000000000000054551226556647300173730ustar00rootroot00000000000000/* Copyright (C) 1991-1994, 1996-1998, 2000, 2004, 2007-2011 Free Software Foundation, Inc. This file is part of the GNU C Library. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* This particular implementation was written by Eric Blake, 2008. */ #ifndef _LIBC # include #endif /* Specification of memmem. */ #include #ifndef _LIBC # define __builtin_expect(expr, val) (expr) #endif #define RETURN_TYPE void * #define AVAILABLE(h, h_l, j, n_l) ((j) <= (h_l) - (n_l)) #include "str-two-way.h" /* Return the first occurrence of NEEDLE in HAYSTACK. Return HAYSTACK if NEEDLE_LEN is 0, otherwise NULL if NEEDLE is not found in HAYSTACK. */ void * memmem (const void *haystack_start, size_t haystack_len, const void *needle_start, size_t needle_len) { /* Abstract memory is considered to be an array of 'unsigned char' values, not an array of 'char' values. See ISO C 99 section 6.2.6.1. */ const unsigned char *haystack = (const unsigned char *) haystack_start; const unsigned char *needle = (const unsigned char *) needle_start; if (needle_len == 0) /* The first occurrence of the empty string is deemed to occur at the beginning of the string. */ return (void *) haystack; /* Sanity check, otherwise the loop might search through the whole memory. */ if (__builtin_expect (haystack_len < needle_len, 0)) return NULL; /* Use optimizations in memchr when possible, to reduce the search size of haystack using a linear algorithm with a smaller coefficient. However, avoid memchr for long needles, since we can often achieve sublinear performance. */ if (needle_len < LONG_NEEDLE_THRESHOLD) { haystack = memchr (haystack, *needle, haystack_len); if (!haystack || __builtin_expect (needle_len == 1, 0)) return (void *) haystack; haystack_len -= haystack - (const unsigned char *) haystack_start; if (haystack_len < needle_len) return NULL; return two_way_short_needle (haystack, haystack_len, needle, needle_len); } else return two_way_long_needle (haystack, haystack_len, needle, needle_len); } #undef LONG_NEEDLE_THRESHOLD bfgminer-bfgminer-3.10.0/lib/sig-handler.h000066400000000000000000000032361226556647300203130ustar00rootroot00000000000000/* Convenience declarations when working with . Copyright (C) 2008-2011 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #ifndef _GL_SIG_HANDLER_H #define _GL_SIG_HANDLER_H #include /* Convenience type when working with signal handlers. */ typedef void (*sa_handler_t) (int); /* Return the handler of a signal, as a sa_handler_t value regardless of its true type. The resulting function can be compared to special values like SIG_IGN but it is not portable to call it. */ static inline sa_handler_t get_handler (struct sigaction const *a) { #ifdef SA_SIGINFO /* POSIX says that special values like SIG_IGN can only occur when action.sa_flags does not contain SA_SIGINFO. But in Linux 2.4, for example, sa_sigaction and sa_handler are aliases and a signal is ignored if sa_sigaction (after casting) equals SIG_IGN. So use (and cast) sa_sigaction in that case. */ if (a->sa_flags & SA_SIGINFO) return (sa_handler_t) a->sa_sigaction; #endif return a->sa_handler; } #endif /* _GL_SIG_HANDLER_H */ bfgminer-bfgminer-3.10.0/lib/sigaction.c000066400000000000000000000161151226556647300200710ustar00rootroot00000000000000/* POSIX compatible signal blocking. Copyright (C) 2008-2011 Free Software Foundation, Inc. Written by Eric Blake , 2008. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #include /* Specification. */ #include #include #include #include /* This implementation of sigaction is tailored to Woe32 behavior: signal() has SysV semantics (ie. the handler is uninstalled before it is invoked). This is an inherent data race if an asynchronous signal is sent twice in a row before we can reinstall our handler, but there's nothing we can do about it. Meanwhile, sigprocmask() is not present, and while we can use the gnulib replacement to provide critical sections, it too suffers from potential data races in the face of an ill-timed asynchronous signal. And we compound the situation by reading static storage in a signal handler, which POSIX warns is not generically async-signal-safe. Oh well. Additionally: - We don't implement SA_NOCLDSTOP or SA_NOCLDWAIT, because SIGCHLD is not defined. - We don't implement SA_ONSTACK, because sigaltstack() is not present. - We ignore SA_RESTART, because blocking Win32 calls are not interrupted anyway when an asynchronous signal occurs, and the MSVCRT runtime never sets errno to EINTR. - We don't implement SA_SIGINFO because it is impossible to do so portably. POSIX states that an application should not mix signal() and sigaction(). We support the use of signal() within the gnulib sigprocmask() substitute, but all other application code linked with this module should stick with only sigaction(). */ /* Check some of our assumptions. */ #if defined SIGCHLD || defined HAVE_SIGALTSTACK || defined HAVE_SIGINTERRUPT # error "Revisit the assumptions made in the sigaction module" #endif /* Out-of-range substitutes make a good fallback for uncatchable signals. */ #ifndef SIGKILL # define SIGKILL (-1) #endif #ifndef SIGSTOP # define SIGSTOP (-1) #endif /* On native Windows, as of 2008, the signal SIGABRT_COMPAT is an alias for the signal SIGABRT. Only one signal handler is stored for both SIGABRT and SIGABRT_COMPAT. SIGABRT_COMPAT is not a signal of its own. */ #if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ # undef SIGABRT_COMPAT # define SIGABRT_COMPAT 6 #endif /* A signal handler. */ typedef void (*handler_t) (int signal); /* Set of current actions. If sa_handler for an entry is NULL, then that signal is not currently handled by the sigaction handler. */ static struct sigaction volatile action_array[NSIG] /* = 0 */; /* Signal handler that is installed for signals. */ static void sigaction_handler (int sig) { handler_t handler; sigset_t mask; sigset_t oldmask; int saved_errno = errno; if (sig < 0 || NSIG <= sig || !action_array[sig].sa_handler) { /* Unexpected situation; be careful to avoid recursive abort. */ if (sig == SIGABRT) signal (SIGABRT, SIG_DFL); abort (); } /* Reinstall the signal handler when required; otherwise update the bookkeeping so that the user's handler may call sigaction and get accurate results. We know the signal isn't currently blocked, or we wouldn't be in its handler, therefore we know that we are not interrupting a sigaction() call. There is a race where any asynchronous instance of the same signal occurring before we reinstall the handler will trigger the default handler; oh well. */ handler = action_array[sig].sa_handler; if ((action_array[sig].sa_flags & SA_RESETHAND) == 0) signal (sig, sigaction_handler); else action_array[sig].sa_handler = NULL; /* Block appropriate signals. */ mask = action_array[sig].sa_mask; if ((action_array[sig].sa_flags & SA_NODEFER) == 0) sigaddset (&mask, sig); sigprocmask (SIG_BLOCK, &mask, &oldmask); /* Invoke the user's handler, then restore prior mask. */ errno = saved_errno; handler (sig); saved_errno = errno; sigprocmask (SIG_SETMASK, &oldmask, NULL); errno = saved_errno; } /* Change and/or query the action that will be taken on delivery of signal SIG. If not NULL, ACT describes the new behavior. If not NULL, OACT is set to the prior behavior. Return 0 on success, or set errno and return -1 on failure. */ int sigaction (int sig, const struct sigaction *restrict act, struct sigaction *restrict oact) { sigset_t mask; sigset_t oldmask; int saved_errno; if (sig < 0 || NSIG <= sig || sig == SIGKILL || sig == SIGSTOP || (act && act->sa_handler == SIG_ERR)) { errno = EINVAL; return -1; } #ifdef SIGABRT_COMPAT if (sig == SIGABRT_COMPAT) sig = SIGABRT; #endif /* POSIX requires sigaction() to be async-signal-safe. In other words, if an asynchronous signal can occur while we are anywhere inside this function, the user's handler could then call sigaction() recursively and expect consistent results. We meet this rule by using sigprocmask to block all signals before modifying any data structure that could be read from a signal handler; this works since we know that the gnulib sigprocmask replacement does not try to use sigaction() from its handler. */ if (!act && !oact) return 0; sigfillset (&mask); sigprocmask (SIG_BLOCK, &mask, &oldmask); if (oact) { if (action_array[sig].sa_handler) *oact = action_array[sig]; else { /* Safe to change the handler at will here, since all signals are currently blocked. */ oact->sa_handler = signal (sig, SIG_DFL); if (oact->sa_handler == SIG_ERR) goto failure; signal (sig, oact->sa_handler); oact->sa_flags = SA_RESETHAND | SA_NODEFER; sigemptyset (&oact->sa_mask); } } if (act) { /* Safe to install the handler before updating action_array, since all signals are currently blocked. */ if (act->sa_handler == SIG_DFL || act->sa_handler == SIG_IGN) { if (signal (sig, act->sa_handler) == SIG_ERR) goto failure; action_array[sig].sa_handler = NULL; } else { if (signal (sig, sigaction_handler) == SIG_ERR) goto failure; action_array[sig] = *act; } } sigprocmask (SIG_SETMASK, &oldmask, NULL); return 0; failure: saved_errno = errno; sigprocmask (SIG_SETMASK, &oldmask, NULL); errno = saved_errno; return -1; } bfgminer-bfgminer-3.10.0/lib/signal.in.h000066400000000000000000000262401226556647300200000ustar00rootroot00000000000000/* A GNU-like . Copyright (C) 2006-2011 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #if __GNUC__ >= 3 @PRAGMA_SYSTEM_HEADER@ #endif @PRAGMA_COLUMNS@ #include "config.h" #if defined __need_sig_atomic_t || defined __need_sigset_t /* Special invocation convention inside glibc header files. */ # @INCLUDE_NEXT@ @NEXT_SIGNAL_H@ #else /* Normal invocation convention. */ #ifndef _@GUARD_PREFIX@_SIGNAL_H /* The include_next requires a split double-inclusion guard. */ #@INCLUDE_NEXT@ @NEXT_SIGNAL_H@ #ifndef _@GUARD_PREFIX@_SIGNAL_H #define _@GUARD_PREFIX@_SIGNAL_H /* The definitions of _GL_FUNCDECL_RPL etc. are copied here. */ /* The definition of _GL_ARG_NONNULL is copied here. */ /* The definition of _GL_WARN_ON_USE is copied here. */ /* Define pid_t, uid_t. Also, mingw defines sigset_t not in , but in . */ #include /* On AIX, sig_atomic_t already includes volatile. C99 requires that 'volatile sig_atomic_t' ignore the extra modifier, but C89 did not. Hence, redefine this to a non-volatile type as needed. */ #if ! @HAVE_TYPE_VOLATILE_SIG_ATOMIC_T@ # if !GNULIB_defined_sig_atomic_t typedef int rpl_sig_atomic_t; # undef sig_atomic_t # define sig_atomic_t rpl_sig_atomic_t # define GNULIB_defined_sig_atomic_t 1 # endif #endif /* A set or mask of signals. */ #if !@HAVE_SIGSET_T@ # if !GNULIB_defined_sigset_t typedef unsigned int sigset_t; # define GNULIB_defined_sigset_t 1 # endif #endif /* Define sighandler_t, the type of signal handlers. A GNU extension. */ #if !@HAVE_SIGHANDLER_T@ # ifdef __cplusplus extern "C" { # endif # if !GNULIB_defined_sighandler_t typedef void (*sighandler_t) (int); # define GNULIB_defined_sighandler_t 1 # endif # ifdef __cplusplus } # endif #endif #if @GNULIB_SIGNAL_H_SIGPIPE@ # ifndef SIGPIPE /* Define SIGPIPE to a value that does not overlap with other signals. */ # define SIGPIPE 13 # define GNULIB_defined_SIGPIPE 1 /* To actually use SIGPIPE, you also need the gnulib modules 'sigprocmask', 'write', 'stdio'. */ # endif #endif /* Maximum signal number + 1. */ #ifndef NSIG # if defined __TANDEM # define NSIG 32 # endif #endif #if @GNULIB_SIGPROCMASK@ # if !@HAVE_POSIX_SIGNALBLOCKING@ /* Maximum signal number + 1. */ # ifndef NSIG # define NSIG 32 # endif /* This code supports only 32 signals. */ # if !GNULIB_defined_verify_NSIG_constraint typedef int verify_NSIG_constraint[NSIG <= 32 ? 1 : -1]; # define GNULIB_defined_verify_NSIG_constraint 1 # endif # endif /* Test whether a given signal is contained in a signal set. */ # if @HAVE_POSIX_SIGNALBLOCKING@ /* This function is defined as a macro on MacOS X. */ # if defined __cplusplus && defined GNULIB_NAMESPACE # undef sigismember # endif # else _GL_FUNCDECL_SYS (sigismember, int, (const sigset_t *set, int sig) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (sigismember, int, (const sigset_t *set, int sig)); _GL_CXXALIASWARN (sigismember); /* Initialize a signal set to the empty set. */ # if @HAVE_POSIX_SIGNALBLOCKING@ /* This function is defined as a macro on MacOS X. */ # if defined __cplusplus && defined GNULIB_NAMESPACE # undef sigemptyset # endif # else _GL_FUNCDECL_SYS (sigemptyset, int, (sigset_t *set) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (sigemptyset, int, (sigset_t *set)); _GL_CXXALIASWARN (sigemptyset); /* Add a signal to a signal set. */ # if @HAVE_POSIX_SIGNALBLOCKING@ /* This function is defined as a macro on MacOS X. */ # if defined __cplusplus && defined GNULIB_NAMESPACE # undef sigaddset # endif # else _GL_FUNCDECL_SYS (sigaddset, int, (sigset_t *set, int sig) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (sigaddset, int, (sigset_t *set, int sig)); _GL_CXXALIASWARN (sigaddset); /* Remove a signal from a signal set. */ # if @HAVE_POSIX_SIGNALBLOCKING@ /* This function is defined as a macro on MacOS X. */ # if defined __cplusplus && defined GNULIB_NAMESPACE # undef sigdelset # endif # else _GL_FUNCDECL_SYS (sigdelset, int, (sigset_t *set, int sig) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (sigdelset, int, (sigset_t *set, int sig)); _GL_CXXALIASWARN (sigdelset); /* Fill a signal set with all possible signals. */ # if @HAVE_POSIX_SIGNALBLOCKING@ /* This function is defined as a macro on MacOS X. */ # if defined __cplusplus && defined GNULIB_NAMESPACE # undef sigfillset # endif # else _GL_FUNCDECL_SYS (sigfillset, int, (sigset_t *set) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (sigfillset, int, (sigset_t *set)); _GL_CXXALIASWARN (sigfillset); /* Return the set of those blocked signals that are pending. */ # if !@HAVE_POSIX_SIGNALBLOCKING@ _GL_FUNCDECL_SYS (sigpending, int, (sigset_t *set) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (sigpending, int, (sigset_t *set)); _GL_CXXALIASWARN (sigpending); /* If OLD_SET is not NULL, put the current set of blocked signals in *OLD_SET. Then, if SET is not NULL, affect the current set of blocked signals by combining it with *SET as indicated in OPERATION. In this implementation, you are not allowed to change a signal handler while the signal is blocked. */ # if !@HAVE_POSIX_SIGNALBLOCKING@ # define SIG_BLOCK 0 /* blocked_set = blocked_set | *set; */ # define SIG_SETMASK 1 /* blocked_set = *set; */ # define SIG_UNBLOCK 2 /* blocked_set = blocked_set & ~*set; */ _GL_FUNCDECL_SYS (sigprocmask, int, (int operation, const sigset_t *set, sigset_t *old_set)); # endif _GL_CXXALIAS_SYS (sigprocmask, int, (int operation, const sigset_t *set, sigset_t *old_set)); _GL_CXXALIASWARN (sigprocmask); /* Install the handler FUNC for signal SIG, and return the previous handler. */ # ifdef __cplusplus extern "C" { # endif # if !GNULIB_defined_function_taking_int_returning_void_t typedef void (*_gl_function_taking_int_returning_void_t) (int); # define GNULIB_defined_function_taking_int_returning_void_t 1 # endif # ifdef __cplusplus } # endif # if !@HAVE_POSIX_SIGNALBLOCKING@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define signal rpl_signal # endif _GL_FUNCDECL_RPL (signal, _gl_function_taking_int_returning_void_t, (int sig, _gl_function_taking_int_returning_void_t func)); _GL_CXXALIAS_RPL (signal, _gl_function_taking_int_returning_void_t, (int sig, _gl_function_taking_int_returning_void_t func)); # else _GL_CXXALIAS_SYS (signal, _gl_function_taking_int_returning_void_t, (int sig, _gl_function_taking_int_returning_void_t func)); # endif _GL_CXXALIASWARN (signal); /* Raise signal SIG. */ # if !@HAVE_POSIX_SIGNALBLOCKING@ && GNULIB_defined_SIGPIPE # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef raise # define raise rpl_raise # endif _GL_FUNCDECL_RPL (raise, int, (int sig)); _GL_CXXALIAS_RPL (raise, int, (int sig)); # else _GL_CXXALIAS_SYS (raise, int, (int sig)); # endif _GL_CXXALIASWARN (raise); #elif defined GNULIB_POSIXCHECK # undef sigaddset # if HAVE_RAW_DECL_SIGADDSET _GL_WARN_ON_USE (sigaddset, "sigaddset is unportable - " "use the gnulib module sigprocmask for portability"); # endif # undef sigdelset # if HAVE_RAW_DECL_SIGDELSET _GL_WARN_ON_USE (sigdelset, "sigdelset is unportable - " "use the gnulib module sigprocmask for portability"); # endif # undef sigemptyset # if HAVE_RAW_DECL_SIGEMPTYSET _GL_WARN_ON_USE (sigemptyset, "sigemptyset is unportable - " "use the gnulib module sigprocmask for portability"); # endif # undef sigfillset # if HAVE_RAW_DECL_SIGFILLSET _GL_WARN_ON_USE (sigfillset, "sigfillset is unportable - " "use the gnulib module sigprocmask for portability"); # endif # undef sigismember # if HAVE_RAW_DECL_SIGISMEMBER _GL_WARN_ON_USE (sigismember, "sigismember is unportable - " "use the gnulib module sigprocmask for portability"); # endif # undef sigpending # if HAVE_RAW_DECL_SIGPENDING _GL_WARN_ON_USE (sigpending, "sigpending is unportable - " "use the gnulib module sigprocmask for portability"); # endif # undef sigprocmask # if HAVE_RAW_DECL_SIGPROCMASK _GL_WARN_ON_USE (sigprocmask, "sigprocmask is unportable - " "use the gnulib module sigprocmask for portability"); # endif #endif /* @GNULIB_SIGPROCMASK@ */ #if @GNULIB_SIGACTION@ # if !@HAVE_SIGACTION@ # if !@HAVE_SIGINFO_T@ # if !GNULIB_defined_siginfo_types /* Present to allow compilation, but unsupported by gnulib. */ union sigval { int sival_int; void *sival_ptr; }; /* Present to allow compilation, but unsupported by gnulib. */ struct siginfo_t { int si_signo; int si_code; int si_errno; pid_t si_pid; uid_t si_uid; void *si_addr; int si_status; long si_band; union sigval si_value; }; typedef struct siginfo_t siginfo_t; # define GNULIB_defined_siginfo_types 1 # endif # endif /* !@HAVE_SIGINFO_T@ */ /* We assume that platforms which lack the sigaction() function also lack the 'struct sigaction' type, and vice versa. */ # if !GNULIB_defined_struct_sigaction struct sigaction { union { void (*_sa_handler) (int); /* Present to allow compilation, but unsupported by gnulib. POSIX says that implementations may, but not must, make sa_sigaction overlap with sa_handler, but we know of no implementation where they do not overlap. */ void (*_sa_sigaction) (int, siginfo_t *, void *); } _sa_func; sigset_t sa_mask; /* Not all POSIX flags are supported. */ int sa_flags; }; # define sa_handler _sa_func._sa_handler # define sa_sigaction _sa_func._sa_sigaction /* Unsupported flags are not present. */ # define SA_RESETHAND 1 # define SA_NODEFER 2 # define SA_RESTART 4 # define GNULIB_defined_struct_sigaction 1 # endif _GL_FUNCDECL_SYS (sigaction, int, (int, const struct sigaction *restrict, struct sigaction *restrict)); # elif !@HAVE_STRUCT_SIGACTION_SA_SIGACTION@ # define sa_sigaction sa_handler # endif /* !@HAVE_SIGACTION@, !@HAVE_STRUCT_SIGACTION_SA_SIGACTION@ */ _GL_CXXALIAS_SYS (sigaction, int, (int, const struct sigaction *restrict, struct sigaction *restrict)); _GL_CXXALIASWARN (sigaction); #elif defined GNULIB_POSIXCHECK # undef sigaction # if HAVE_RAW_DECL_SIGACTION _GL_WARN_ON_USE (sigaction, "sigaction is unportable - " "use the gnulib module sigaction for portability"); # endif #endif /* Some systems don't have SA_NODEFER. */ #ifndef SA_NODEFER # define SA_NODEFER 0 #endif #endif /* _@GUARD_PREFIX@_SIGNAL_H */ #endif /* _@GUARD_PREFIX@_SIGNAL_H */ #endif bfgminer-bfgminer-3.10.0/lib/sigprocmask.c000066400000000000000000000201611226556647300204270ustar00rootroot00000000000000/* POSIX compatible signal blocking. Copyright (C) 2006-2011 Free Software Foundation, Inc. Written by Bruno Haible , 2006. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #include /* Specification. */ #include #include #include #include /* We assume that a platform without POSIX signal blocking functions also does not have the POSIX sigaction() function, only the signal() function. We also assume signal() has SysV semantics, where any handler is uninstalled prior to being invoked. This is true for Woe32 platforms. */ /* We use raw signal(), but also provide a wrapper rpl_signal() so that applications can query or change a blocked signal. */ #undef signal /* Provide invalid signal numbers as fallbacks if the uncatchable signals are not defined. */ #ifndef SIGKILL # define SIGKILL (-1) #endif #ifndef SIGSTOP # define SIGSTOP (-1) #endif /* On native Windows, as of 2008, the signal SIGABRT_COMPAT is an alias for the signal SIGABRT. Only one signal handler is stored for both SIGABRT and SIGABRT_COMPAT. SIGABRT_COMPAT is not a signal of its own. */ #if (defined _WIN32 || defined __WIN32__) && ! defined __CYGWIN__ # undef SIGABRT_COMPAT # define SIGABRT_COMPAT 6 #endif #ifdef SIGABRT_COMPAT # define SIGABRT_COMPAT_MASK (1U << SIGABRT_COMPAT) #else # define SIGABRT_COMPAT_MASK 0 #endif typedef void (*handler_t) (int); /* Handling of gnulib defined signals. */ #if GNULIB_defined_SIGPIPE static handler_t SIGPIPE_handler = SIG_DFL; #endif #if GNULIB_defined_SIGPIPE static handler_t ext_signal (int sig, handler_t handler) { switch (sig) { case SIGPIPE: { handler_t old_handler = SIGPIPE_handler; SIGPIPE_handler = handler; return old_handler; } default: /* System defined signal */ return signal (sig, handler); } } # define signal ext_signal #endif int sigismember (const sigset_t *set, int sig) { if (sig >= 0 && sig < NSIG) { #ifdef SIGABRT_COMPAT if (sig == SIGABRT_COMPAT) sig = SIGABRT; #endif return (*set >> sig) & 1; } else return 0; } int sigemptyset (sigset_t *set) { *set = 0; return 0; } int sigaddset (sigset_t *set, int sig) { if (sig >= 0 && sig < NSIG) { #ifdef SIGABRT_COMPAT if (sig == SIGABRT_COMPAT) sig = SIGABRT; #endif *set |= 1U << sig; return 0; } else { errno = EINVAL; return -1; } } int sigdelset (sigset_t *set, int sig) { if (sig >= 0 && sig < NSIG) { #ifdef SIGABRT_COMPAT if (sig == SIGABRT_COMPAT) sig = SIGABRT; #endif *set &= ~(1U << sig); return 0; } else { errno = EINVAL; return -1; } } int sigfillset (sigset_t *set) { *set = ((2U << (NSIG - 1)) - 1) & ~ SIGABRT_COMPAT_MASK; return 0; } /* Set of currently blocked signals. */ static volatile sigset_t blocked_set /* = 0 */; /* Set of currently blocked and pending signals. */ static volatile sig_atomic_t pending_array[NSIG] /* = { 0 } */; /* Signal handler that is installed for blocked signals. */ static void blocked_handler (int sig) { /* Reinstall the handler, in case the signal occurs multiple times while blocked. There is an inherent race where an asynchronous signal in between when the kernel uninstalled the handler and when we reinstall it will trigger the default handler; oh well. */ signal (sig, blocked_handler); if (sig >= 0 && sig < NSIG) pending_array[sig] = 1; } int sigpending (sigset_t *set) { sigset_t pending = 0; int sig; for (sig = 0; sig < NSIG; sig++) if (pending_array[sig]) pending |= 1U << sig; *set = pending; return 0; } /* The previous signal handlers. Only the array elements corresponding to blocked signals are relevant. */ static volatile handler_t old_handlers[NSIG]; int sigprocmask (int operation, const sigset_t *set, sigset_t *old_set) { if (old_set != NULL) *old_set = blocked_set; if (set != NULL) { sigset_t new_blocked_set; sigset_t to_unblock; sigset_t to_block; switch (operation) { case SIG_BLOCK: new_blocked_set = blocked_set | *set; break; case SIG_SETMASK: new_blocked_set = *set; break; case SIG_UNBLOCK: new_blocked_set = blocked_set & ~*set; break; default: errno = EINVAL; return -1; } to_unblock = blocked_set & ~new_blocked_set; to_block = new_blocked_set & ~blocked_set; if (to_block != 0) { int sig; for (sig = 0; sig < NSIG; sig++) if ((to_block >> sig) & 1) { pending_array[sig] = 0; if ((old_handlers[sig] = signal (sig, blocked_handler)) != SIG_ERR) blocked_set |= 1U << sig; } } if (to_unblock != 0) { sig_atomic_t received[NSIG]; int sig; for (sig = 0; sig < NSIG; sig++) if ((to_unblock >> sig) & 1) { if (signal (sig, old_handlers[sig]) != blocked_handler) /* The application changed a signal handler while the signal was blocked, bypassing our rpl_signal replacement. We don't support this. */ abort (); received[sig] = pending_array[sig]; blocked_set &= ~(1U << sig); pending_array[sig] = 0; } else received[sig] = 0; for (sig = 0; sig < NSIG; sig++) if (received[sig]) raise (sig); } } return 0; } /* Install the handler FUNC for signal SIG, and return the previous handler. */ handler_t rpl_signal (int sig, handler_t handler) { /* We must provide a wrapper, so that a user can query what handler they installed even if that signal is currently blocked. */ if (sig >= 0 && sig < NSIG && sig != SIGKILL && sig != SIGSTOP && handler != SIG_ERR) { #ifdef SIGABRT_COMPAT if (sig == SIGABRT_COMPAT) sig = SIGABRT; #endif if (blocked_set & (1U << sig)) { /* POSIX states that sigprocmask and signal are both async-signal-safe. This is not true of our implementation - there is a slight data race where an asynchronous interrupt on signal A can occur after we install blocked_handler but before we have updated old_handlers for signal B, such that handler A can see stale information if it calls signal(B). Oh well - signal handlers really shouldn't try to manipulate the installed handlers of unrelated signals. */ handler_t result = old_handlers[sig]; old_handlers[sig] = handler; return result; } else return signal (sig, handler); } else { errno = EINVAL; return SIG_ERR; } } #if GNULIB_defined_SIGPIPE /* Raise the signal SIG. */ int rpl_raise (int sig) # undef raise { switch (sig) { case SIGPIPE: if (blocked_set & (1U << sig)) pending_array[sig] = 1; else { handler_t handler = SIGPIPE_handler; if (handler == SIG_DFL) exit (128 + SIGPIPE); else if (handler != SIG_IGN) (*handler) (sig); } return 0; default: /* System defined signal */ return raise (sig); } } #endif bfgminer-bfgminer-3.10.0/lib/stddef.in.h000066400000000000000000000053431226556647300177750ustar00rootroot00000000000000/* A substitute for POSIX 2008 , for platforms that have issues. Copyright (C) 2009-2011 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Written by Eric Blake. */ /* * POSIX 2008 for platforms that have issues. * */ #if __GNUC__ >= 3 @PRAGMA_SYSTEM_HEADER@ #endif @PRAGMA_COLUMNS@ #if defined __need_wchar_t || defined __need_size_t \ || defined __need_ptrdiff_t || defined __need_NULL \ || defined __need_wint_t /* Special invocation convention inside gcc header files. In particular, gcc provides a version of that blindly redefines NULL even when __need_wint_t was defined, even though wint_t is not normally provided by . Hence, we must remember if special invocation has ever been used to obtain wint_t, in which case we need to clean up NULL yet again. */ # if !(defined _@GUARD_PREFIX@_STDDEF_H && defined _GL_STDDEF_WINT_T) # ifdef __need_wint_t # undef _@GUARD_PREFIX@_STDDEF_H # define _GL_STDDEF_WINT_T # endif # @INCLUDE_NEXT@ @NEXT_STDDEF_H@ # endif #else /* Normal invocation convention. */ # ifndef _@GUARD_PREFIX@_STDDEF_H /* The include_next requires a split double-inclusion guard. */ # @INCLUDE_NEXT@ @NEXT_STDDEF_H@ # ifndef _@GUARD_PREFIX@_STDDEF_H # define _@GUARD_PREFIX@_STDDEF_H /* On NetBSD 5.0, the definition of NULL lacks proper parentheses. */ #if @REPLACE_NULL@ # undef NULL # ifdef __cplusplus /* ISO C++ says that the macro NULL must expand to an integer constant expression, hence '((void *) 0)' is not allowed in C++. */ # if __GNUG__ >= 3 /* GNU C++ has a __null macro that behaves like an integer ('int' or 'long') but has the same size as a pointer. Use that, to avoid warnings. */ # define NULL __null # else # define NULL 0L # endif # else # define NULL ((void *) 0) # endif #endif /* Some platforms lack wchar_t. */ #if !@HAVE_WCHAR_T@ # define wchar_t int #endif # endif /* _@GUARD_PREFIX@_STDDEF_H */ # endif /* _@GUARD_PREFIX@_STDDEF_H */ #endif /* __need_XXX */ bfgminer-bfgminer-3.10.0/lib/stdint.in.h000066400000000000000000000431501226556647300200270ustar00rootroot00000000000000/* Copyright (C) 2001-2002, 2004-2011 Free Software Foundation, Inc. Written by Paul Eggert, Bruno Haible, Sam Steingold, Peter Burwood. This file is part of gnulib. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* * ISO C 99 for platforms that lack it. * */ #ifndef _@GUARD_PREFIX@_STDINT_H #if __GNUC__ >= 3 @PRAGMA_SYSTEM_HEADER@ #endif @PRAGMA_COLUMNS@ /* When including a system file that in turn includes , use the system , not our substitute. This avoids problems with (for example) VMS, whose includes . */ #define _GL_JUST_INCLUDE_SYSTEM_INTTYPES_H /* On Android (Bionic libc), includes this file before having defined 'time_t'. Therefore in this case avoid including other system header files; just include the system's . Ideally we should test __BIONIC__ here, but it is only defined after has been included; hence test __ANDROID__ instead. */ #if defined __ANDROID__ \ && defined _SYS_TYPES_H_ && !defined __need_size_t # @INCLUDE_NEXT@ @NEXT_STDINT_H@ #else /* Get those types that are already defined in other system include files, so that we can "#define int8_t signed char" below without worrying about a later system include file containing a "typedef signed char int8_t;" that will get messed up by our macro. Our macros should all be consistent with the system versions, except for the "fast" types and macros, which we recommend against using in public interfaces due to compiler differences. */ #if @HAVE_STDINT_H@ # if defined __sgi && ! defined __c99 /* Bypass IRIX's if in C89 mode, since it merely annoys users with "This header file is to be used only for c99 mode compilations" diagnostics. */ # define __STDINT_H__ # endif /* Other systems may have an incomplete or buggy . Include it before , since any "#include " in would reinclude us, skipping our contents because _@GUARD_PREFIX@_STDINT_H is defined. The include_next requires a split double-inclusion guard. */ # @INCLUDE_NEXT@ @NEXT_STDINT_H@ #endif #if ! defined _@GUARD_PREFIX@_STDINT_H && ! defined _GL_JUST_INCLUDE_SYSTEM_STDINT_H #define _@GUARD_PREFIX@_STDINT_H /* defines some of the stdint.h types as well, on glibc, IRIX 6.5, and OpenBSD 3.8 (via ). AIX 5.2 isn't needed and causes troubles. MacOS X 10.4.6 includes (which is us), but relies on the system definitions, so include after @NEXT_STDINT_H@. */ #if @HAVE_SYS_TYPES_H@ && ! defined _AIX # include #endif /* Get LONG_MIN, LONG_MAX, ULONG_MAX. */ #include #if @HAVE_INTTYPES_H@ /* In OpenBSD 3.8, includes , which defines int{8,16,32,64}_t, uint{8,16,32,64}_t and __BIT_TYPES_DEFINED__. also defines intptr_t and uintptr_t. */ # include #elif @HAVE_SYS_INTTYPES_H@ /* Solaris 7 has the types except the *_fast*_t types, and the macros except for *_FAST*_*, INTPTR_MIN, PTRDIFF_MIN, PTRDIFF_MAX. */ # include #endif #if @HAVE_SYS_BITYPES_H@ && ! defined __BIT_TYPES_DEFINED__ /* Linux libc4 >= 4.6.7 and libc5 have a that defines int{8,16,32,64}_t and __BIT_TYPES_DEFINED__. In libc5 >= 5.2.2 it is included by . */ # include #endif #undef _GL_JUST_INCLUDE_SYSTEM_INTTYPES_H /* Minimum and maximum values for an integer type under the usual assumption. Return an unspecified value if BITS == 0, adding a check to pacify picky compilers. */ #define _STDINT_MIN(signed, bits, zero) \ ((signed) ? (- ((zero) + 1) << ((bits) ? (bits) - 1 : 0)) : (zero)) #define _STDINT_MAX(signed, bits, zero) \ ((signed) \ ? ~ _STDINT_MIN (signed, bits, zero) \ : /* The expression for the unsigned case. The subtraction of (signed) \ is a nop in the unsigned case and avoids "signed integer overflow" \ warnings in the signed case. */ \ ((((zero) + 1) << ((bits) ? (bits) - 1 - (signed) : 0)) - 1) * 2 + 1) #if !GNULIB_defined_stdint_types /* 7.18.1.1. Exact-width integer types */ /* Here we assume a standard architecture where the hardware integer types have 8, 16, 32, optionally 64 bits. */ #undef int8_t #undef uint8_t typedef signed char gl_int8_t; typedef unsigned char gl_uint8_t; #define int8_t gl_int8_t #define uint8_t gl_uint8_t #undef int16_t #undef uint16_t typedef short int gl_int16_t; typedef unsigned short int gl_uint16_t; #define int16_t gl_int16_t #define uint16_t gl_uint16_t #undef int32_t #undef uint32_t typedef int gl_int32_t; typedef unsigned int gl_uint32_t; #define int32_t gl_int32_t #define uint32_t gl_uint32_t /* If the system defines INT64_MAX, assume int64_t works. That way, if the underlying platform defines int64_t to be a 64-bit long long int, the code below won't mistakenly define it to be a 64-bit long int, which would mess up C++ name mangling. We must use #ifdef rather than #if, to avoid an error with HP-UX 10.20 cc. */ #ifdef INT64_MAX # define GL_INT64_T #else /* Do not undefine int64_t if gnulib is not being used with 64-bit types, since otherwise it breaks platforms like Tandem/NSK. */ # if LONG_MAX >> 31 >> 31 == 1 # undef int64_t typedef long int gl_int64_t; # define int64_t gl_int64_t # define GL_INT64_T # elif defined _MSC_VER # undef int64_t typedef __int64 gl_int64_t; # define int64_t gl_int64_t # define GL_INT64_T # elif @HAVE_LONG_LONG_INT@ # undef int64_t typedef long long int gl_int64_t; # define int64_t gl_int64_t # define GL_INT64_T # endif #endif #ifdef UINT64_MAX # define GL_UINT64_T #else # if ULONG_MAX >> 31 >> 31 >> 1 == 1 # undef uint64_t typedef unsigned long int gl_uint64_t; # define uint64_t gl_uint64_t # define GL_UINT64_T # elif defined _MSC_VER # undef uint64_t typedef unsigned __int64 gl_uint64_t; # define uint64_t gl_uint64_t # define GL_UINT64_T # elif @HAVE_UNSIGNED_LONG_LONG_INT@ # undef uint64_t typedef unsigned long long int gl_uint64_t; # define uint64_t gl_uint64_t # define GL_UINT64_T # endif #endif /* Avoid collision with Solaris 2.5.1 etc. */ #define _UINT8_T #define _UINT32_T #define _UINT64_T /* 7.18.1.2. Minimum-width integer types */ /* Here we assume a standard architecture where the hardware integer types have 8, 16, 32, optionally 64 bits. Therefore the leastN_t types are the same as the corresponding N_t types. */ #undef int_least8_t #undef uint_least8_t #undef int_least16_t #undef uint_least16_t #undef int_least32_t #undef uint_least32_t #undef int_least64_t #undef uint_least64_t #define int_least8_t int8_t #define uint_least8_t uint8_t #define int_least16_t int16_t #define uint_least16_t uint16_t #define int_least32_t int32_t #define uint_least32_t uint32_t #ifdef GL_INT64_T # define int_least64_t int64_t #endif #ifdef GL_UINT64_T # define uint_least64_t uint64_t #endif /* 7.18.1.3. Fastest minimum-width integer types */ /* Note: Other substitutes may define these types differently. It is not recommended to use these types in public header files. */ /* Here we assume a standard architecture where the hardware integer types have 8, 16, 32, optionally 64 bits. Therefore the fastN_t types are taken from the same list of types. Assume that 'long int' is fast enough for all narrower integers. */ #undef int_fast8_t #undef uint_fast8_t #undef int_fast16_t #undef uint_fast16_t #undef int_fast32_t #undef uint_fast32_t #undef int_fast64_t #undef uint_fast64_t typedef long int gl_int_fast8_t; typedef unsigned long int gl_uint_fast8_t; typedef long int gl_int_fast16_t; typedef unsigned long int gl_uint_fast16_t; typedef long int gl_int_fast32_t; typedef unsigned long int gl_uint_fast32_t; #define int_fast8_t gl_int_fast8_t #define uint_fast8_t gl_uint_fast8_t #define int_fast16_t gl_int_fast16_t #define uint_fast16_t gl_uint_fast16_t #define int_fast32_t gl_int_fast32_t #define uint_fast32_t gl_uint_fast32_t #ifdef GL_INT64_T # define int_fast64_t int64_t #endif #ifdef GL_UINT64_T # define uint_fast64_t uint64_t #endif /* 7.18.1.4. Integer types capable of holding object pointers */ #undef intptr_t #undef uintptr_t typedef long int gl_intptr_t; typedef unsigned long int gl_uintptr_t; #define intptr_t gl_intptr_t #define uintptr_t gl_uintptr_t /* 7.18.1.5. Greatest-width integer types */ /* Note: These types are compiler dependent. It may be unwise to use them in public header files. */ #undef intmax_t #if @HAVE_LONG_LONG_INT@ && LONG_MAX >> 30 == 1 typedef long long int gl_intmax_t; # define intmax_t gl_intmax_t #elif defined GL_INT64_T # define intmax_t int64_t #else typedef long int gl_intmax_t; # define intmax_t gl_intmax_t #endif #undef uintmax_t #if @HAVE_UNSIGNED_LONG_LONG_INT@ && ULONG_MAX >> 31 == 1 typedef unsigned long long int gl_uintmax_t; # define uintmax_t gl_uintmax_t #elif defined GL_UINT64_T # define uintmax_t uint64_t #else typedef unsigned long int gl_uintmax_t; # define uintmax_t gl_uintmax_t #endif /* Verify that intmax_t and uintmax_t have the same size. Too much code breaks if this is not the case. If this check fails, the reason is likely to be found in the autoconf macros. */ typedef int _verify_intmax_size[sizeof (intmax_t) == sizeof (uintmax_t) ? 1 : -1]; #define GNULIB_defined_stdint_types 1 #endif /* !GNULIB_defined_stdint_types */ /* 7.18.2. Limits of specified-width integer types */ #if ! defined __cplusplus || defined __STDC_LIMIT_MACROS /* 7.18.2.1. Limits of exact-width integer types */ /* Here we assume a standard architecture where the hardware integer types have 8, 16, 32, optionally 64 bits. */ #undef INT8_MIN #undef INT8_MAX #undef UINT8_MAX #define INT8_MIN (~ INT8_MAX) #define INT8_MAX 127 #define UINT8_MAX 255 #undef INT16_MIN #undef INT16_MAX #undef UINT16_MAX #define INT16_MIN (~ INT16_MAX) #define INT16_MAX 32767 #define UINT16_MAX 65535 #undef INT32_MIN #undef INT32_MAX #undef UINT32_MAX #define INT32_MIN (~ INT32_MAX) #define INT32_MAX 2147483647 #define UINT32_MAX 4294967295U #if defined GL_INT64_T && ! defined INT64_MAX /* Prefer (- INTMAX_C (1) << 63) over (~ INT64_MAX) because SunPRO C 5.0 evaluates the latter incorrectly in preprocessor expressions. */ # define INT64_MIN (- INTMAX_C (1) << 63) # define INT64_MAX INTMAX_C (9223372036854775807) #endif #if defined GL_UINT64_T && ! defined UINT64_MAX # define UINT64_MAX UINTMAX_C (18446744073709551615) #endif /* 7.18.2.2. Limits of minimum-width integer types */ /* Here we assume a standard architecture where the hardware integer types have 8, 16, 32, optionally 64 bits. Therefore the leastN_t types are the same as the corresponding N_t types. */ #undef INT_LEAST8_MIN #undef INT_LEAST8_MAX #undef UINT_LEAST8_MAX #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define UINT_LEAST8_MAX UINT8_MAX #undef INT_LEAST16_MIN #undef INT_LEAST16_MAX #undef UINT_LEAST16_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define UINT_LEAST16_MAX UINT16_MAX #undef INT_LEAST32_MIN #undef INT_LEAST32_MAX #undef UINT_LEAST32_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define UINT_LEAST32_MAX UINT32_MAX #undef INT_LEAST64_MIN #undef INT_LEAST64_MAX #ifdef GL_INT64_T # define INT_LEAST64_MIN INT64_MIN # define INT_LEAST64_MAX INT64_MAX #endif #undef UINT_LEAST64_MAX #ifdef GL_UINT64_T # define UINT_LEAST64_MAX UINT64_MAX #endif /* 7.18.2.3. Limits of fastest minimum-width integer types */ /* Here we assume a standard architecture where the hardware integer types have 8, 16, 32, optionally 64 bits. Therefore the fastN_t types are taken from the same list of types. */ #undef INT_FAST8_MIN #undef INT_FAST8_MAX #undef UINT_FAST8_MAX #define INT_FAST8_MIN LONG_MIN #define INT_FAST8_MAX LONG_MAX #define UINT_FAST8_MAX ULONG_MAX #undef INT_FAST16_MIN #undef INT_FAST16_MAX #undef UINT_FAST16_MAX #define INT_FAST16_MIN LONG_MIN #define INT_FAST16_MAX LONG_MAX #define UINT_FAST16_MAX ULONG_MAX #undef INT_FAST32_MIN #undef INT_FAST32_MAX #undef UINT_FAST32_MAX #define INT_FAST32_MIN LONG_MIN #define INT_FAST32_MAX LONG_MAX #define UINT_FAST32_MAX ULONG_MAX #undef INT_FAST64_MIN #undef INT_FAST64_MAX #ifdef GL_INT64_T # define INT_FAST64_MIN INT64_MIN # define INT_FAST64_MAX INT64_MAX #endif #undef UINT_FAST64_MAX #ifdef GL_UINT64_T # define UINT_FAST64_MAX UINT64_MAX #endif /* 7.18.2.4. Limits of integer types capable of holding object pointers */ #undef INTPTR_MIN #undef INTPTR_MAX #undef UINTPTR_MAX #define INTPTR_MIN LONG_MIN #define INTPTR_MAX LONG_MAX #define UINTPTR_MAX ULONG_MAX /* 7.18.2.5. Limits of greatest-width integer types */ #undef INTMAX_MIN #undef INTMAX_MAX #ifdef INT64_MAX # define INTMAX_MIN INT64_MIN # define INTMAX_MAX INT64_MAX #else # define INTMAX_MIN INT32_MIN # define INTMAX_MAX INT32_MAX #endif #undef UINTMAX_MAX #ifdef UINT64_MAX # define UINTMAX_MAX UINT64_MAX #else # define UINTMAX_MAX UINT32_MAX #endif /* 7.18.3. Limits of other integer types */ /* ptrdiff_t limits */ #undef PTRDIFF_MIN #undef PTRDIFF_MAX #if @APPLE_UNIVERSAL_BUILD@ # ifdef _LP64 # define PTRDIFF_MIN _STDINT_MIN (1, 64, 0l) # define PTRDIFF_MAX _STDINT_MAX (1, 64, 0l) # else # define PTRDIFF_MIN _STDINT_MIN (1, 32, 0) # define PTRDIFF_MAX _STDINT_MAX (1, 32, 0) # endif #else # define PTRDIFF_MIN \ _STDINT_MIN (1, @BITSIZEOF_PTRDIFF_T@, 0@PTRDIFF_T_SUFFIX@) # define PTRDIFF_MAX \ _STDINT_MAX (1, @BITSIZEOF_PTRDIFF_T@, 0@PTRDIFF_T_SUFFIX@) #endif /* sig_atomic_t limits */ #undef SIG_ATOMIC_MIN #undef SIG_ATOMIC_MAX #define SIG_ATOMIC_MIN \ _STDINT_MIN (@HAVE_SIGNED_SIG_ATOMIC_T@, @BITSIZEOF_SIG_ATOMIC_T@, \ 0@SIG_ATOMIC_T_SUFFIX@) #define SIG_ATOMIC_MAX \ _STDINT_MAX (@HAVE_SIGNED_SIG_ATOMIC_T@, @BITSIZEOF_SIG_ATOMIC_T@, \ 0@SIG_ATOMIC_T_SUFFIX@) /* size_t limit */ #undef SIZE_MAX #if @APPLE_UNIVERSAL_BUILD@ # ifdef _LP64 # define SIZE_MAX _STDINT_MAX (0, 64, 0ul) # else # define SIZE_MAX _STDINT_MAX (0, 32, 0ul) # endif #else # define SIZE_MAX _STDINT_MAX (0, @BITSIZEOF_SIZE_T@, 0@SIZE_T_SUFFIX@) #endif /* wchar_t limits */ /* Get WCHAR_MIN, WCHAR_MAX. This include is not on the top, above, because on OSF/1 4.0 we have a sequence of nested includes -> -> -> , and the latter includes and assumes its types are already defined. */ #if @HAVE_WCHAR_H@ && ! (defined WCHAR_MIN && defined WCHAR_MAX) /* BSD/OS 4.0.1 has a bug: , and must be included before . */ # include # include # include # define _GL_JUST_INCLUDE_SYSTEM_WCHAR_H # include # undef _GL_JUST_INCLUDE_SYSTEM_WCHAR_H #endif #undef WCHAR_MIN #undef WCHAR_MAX #define WCHAR_MIN \ _STDINT_MIN (@HAVE_SIGNED_WCHAR_T@, @BITSIZEOF_WCHAR_T@, 0@WCHAR_T_SUFFIX@) #define WCHAR_MAX \ _STDINT_MAX (@HAVE_SIGNED_WCHAR_T@, @BITSIZEOF_WCHAR_T@, 0@WCHAR_T_SUFFIX@) /* wint_t limits */ #undef WINT_MIN #undef WINT_MAX #define WINT_MIN \ _STDINT_MIN (@HAVE_SIGNED_WINT_T@, @BITSIZEOF_WINT_T@, 0@WINT_T_SUFFIX@) #define WINT_MAX \ _STDINT_MAX (@HAVE_SIGNED_WINT_T@, @BITSIZEOF_WINT_T@, 0@WINT_T_SUFFIX@) #endif /* !defined __cplusplus || defined __STDC_LIMIT_MACROS */ /* 7.18.4. Macros for integer constants */ #if ! defined __cplusplus || defined __STDC_CONSTANT_MACROS /* 7.18.4.1. Macros for minimum-width integer constants */ /* According to ISO C 99 Technical Corrigendum 1 */ /* Here we assume a standard architecture where the hardware integer types have 8, 16, 32, optionally 64 bits, and int is 32 bits. */ #undef INT8_C #undef UINT8_C #define INT8_C(x) x #define UINT8_C(x) x #undef INT16_C #undef UINT16_C #define INT16_C(x) x #define UINT16_C(x) x #undef INT32_C #undef UINT32_C #define INT32_C(x) x #define UINT32_C(x) x ## U #undef INT64_C #undef UINT64_C #if LONG_MAX >> 31 >> 31 == 1 # define INT64_C(x) x##L #elif defined _MSC_VER # define INT64_C(x) x##i64 #elif @HAVE_LONG_LONG_INT@ # define INT64_C(x) x##LL #endif #if ULONG_MAX >> 31 >> 31 >> 1 == 1 # define UINT64_C(x) x##UL #elif defined _MSC_VER # define UINT64_C(x) x##ui64 #elif @HAVE_UNSIGNED_LONG_LONG_INT@ # define UINT64_C(x) x##ULL #endif /* 7.18.4.2. Macros for greatest-width integer constants */ #undef INTMAX_C #if @HAVE_LONG_LONG_INT@ && LONG_MAX >> 30 == 1 # define INTMAX_C(x) x##LL #elif defined GL_INT64_T # define INTMAX_C(x) INT64_C(x) #else # define INTMAX_C(x) x##L #endif #undef UINTMAX_C #if @HAVE_UNSIGNED_LONG_LONG_INT@ && ULONG_MAX >> 31 == 1 # define UINTMAX_C(x) x##ULL #elif defined GL_UINT64_T # define UINTMAX_C(x) UINT64_C(x) #else # define UINTMAX_C(x) x##UL #endif #endif /* !defined __cplusplus || defined __STDC_CONSTANT_MACROS */ #endif /* _@GUARD_PREFIX@_STDINT_H */ #endif /* !(defined __ANDROID__ && ...) */ #endif /* !defined _@GUARD_PREFIX@_STDINT_H && !defined _GL_JUST_INCLUDE_SYSTEM_STDINT_H */ bfgminer-bfgminer-3.10.0/lib/str-two-way.h000066400000000000000000000422761226556647300203420ustar00rootroot00000000000000/* Byte-wise substring search, using the Two-Way algorithm. Copyright (C) 2008-2011 Free Software Foundation, Inc. This file is part of the GNU C Library. Written by Eric Blake , 2008. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* Before including this file, you need to include and , and define: RESULT_TYPE A macro that expands to the return type. AVAILABLE(h, h_l, j, n_l) A macro that returns nonzero if there are at least N_L bytes left starting at H[J]. H is 'unsigned char *', H_L, J, and N_L are 'size_t'; H_L is an lvalue. For NUL-terminated searches, H_L can be modified each iteration to avoid having to compute the end of H up front. For case-insensitivity, you may optionally define: CMP_FUNC(p1, p2, l) A macro that returns 0 iff the first L characters of P1 and P2 are equal. CANON_ELEMENT(c) A macro that canonicalizes an element right after it has been fetched from one of the two strings. The argument is an 'unsigned char'; the result must be an 'unsigned char' as well. This file undefines the macros documented above, and defines LONG_NEEDLE_THRESHOLD. */ #include #include /* We use the Two-Way string matching algorithm (also known as Chrochemore-Perrin), which guarantees linear complexity with constant space. Additionally, for long needles, we also use a bad character shift table similar to the Boyer-Moore algorithm to achieve improved (potentially sub-linear) performance. See http://www-igm.univ-mlv.fr/~lecroq/string/node26.html#SECTION00260, http://en.wikipedia.org/wiki/Boyer-Moore_string_search_algorithm, http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.34.6641&rep=rep1&type=pdf */ /* Point at which computing a bad-byte shift table is likely to be worthwhile. Small needles should not compute a table, since it adds (1 << CHAR_BIT) + NEEDLE_LEN computations of preparation for a speedup no greater than a factor of NEEDLE_LEN. The larger the needle, the better the potential performance gain. On the other hand, on non-POSIX systems with CHAR_BIT larger than eight, the memory required for the table is prohibitive. */ #if CHAR_BIT < 10 # define LONG_NEEDLE_THRESHOLD 32U #else # define LONG_NEEDLE_THRESHOLD SIZE_MAX #endif #ifndef MAX # define MAX(a, b) ((a < b) ? (b) : (a)) #endif #ifndef CANON_ELEMENT # define CANON_ELEMENT(c) c #endif #ifndef CMP_FUNC # define CMP_FUNC memcmp #endif /* Perform a critical factorization of NEEDLE, of length NEEDLE_LEN. Return the index of the first byte in the right half, and set *PERIOD to the global period of the right half. The global period of a string is the smallest index (possibly its length) at which all remaining bytes in the string are repetitions of the prefix (the last repetition may be a subset of the prefix). When NEEDLE is factored into two halves, a local period is the length of the smallest word that shares a suffix with the left half and shares a prefix with the right half. All factorizations of a non-empty NEEDLE have a local period of at least 1 and no greater than NEEDLE_LEN. A critical factorization has the property that the local period equals the global period. All strings have at least one critical factorization with the left half smaller than the global period. And while some strings have more than one critical factorization, it is provable that with an ordered alphabet, at least one of the critical factorizations corresponds to a maximal suffix. Given an ordered alphabet, a critical factorization can be computed in linear time, with 2 * NEEDLE_LEN comparisons, by computing the shorter of two ordered maximal suffixes. The ordered maximal suffixes are determined by lexicographic comparison while tracking periodicity. */ static size_t critical_factorization (const unsigned char *needle, size_t needle_len, size_t *period) { /* Index of last byte of left half, or SIZE_MAX. */ size_t max_suffix, max_suffix_rev; size_t j; /* Index into NEEDLE for current candidate suffix. */ size_t k; /* Offset into current period. */ size_t p; /* Intermediate period. */ unsigned char a, b; /* Current comparison bytes. */ /* Special case NEEDLE_LEN of 1 or 2 (all callers already filtered out 0-length needles. */ if (needle_len < 3) { *period = 1; return needle_len - 1; } /* Invariants: 0 <= j < NEEDLE_LEN - 1 -1 <= max_suffix{,_rev} < j (treating SIZE_MAX as if it were signed) min(max_suffix, max_suffix_rev) < global period of NEEDLE 1 <= p <= global period of NEEDLE p == global period of the substring NEEDLE[max_suffix{,_rev}+1...j] 1 <= k <= p */ /* Perform lexicographic search. */ max_suffix = SIZE_MAX; j = 0; k = p = 1; while (j + k < needle_len) { a = CANON_ELEMENT (needle[j + k]); b = CANON_ELEMENT (needle[max_suffix + k]); if (a < b) { /* Suffix is smaller, period is entire prefix so far. */ j += k; k = 1; p = j - max_suffix; } else if (a == b) { /* Advance through repetition of the current period. */ if (k != p) ++k; else { j += p; k = 1; } } else /* b < a */ { /* Suffix is larger, start over from current location. */ max_suffix = j++; k = p = 1; } } *period = p; /* Perform reverse lexicographic search. */ max_suffix_rev = SIZE_MAX; j = 0; k = p = 1; while (j + k < needle_len) { a = CANON_ELEMENT (needle[j + k]); b = CANON_ELEMENT (needle[max_suffix_rev + k]); if (b < a) { /* Suffix is smaller, period is entire prefix so far. */ j += k; k = 1; p = j - max_suffix_rev; } else if (a == b) { /* Advance through repetition of the current period. */ if (k != p) ++k; else { j += p; k = 1; } } else /* a < b */ { /* Suffix is larger, start over from current location. */ max_suffix_rev = j++; k = p = 1; } } /* Choose the shorter suffix. Return the index of the first byte of the right half, rather than the last byte of the left half. For some examples, 'banana' has two critical factorizations, both exposed by the two lexicographic extreme suffixes of 'anana' and 'nana', where both suffixes have a period of 2. On the other hand, with 'aab' and 'bba', both strings have a single critical factorization of the last byte, with the suffix having a period of 1. While the maximal lexicographic suffix of 'aab' is 'b', the maximal lexicographic suffix of 'bba' is 'ba', which is not a critical factorization. Conversely, the maximal reverse lexicographic suffix of 'a' works for 'bba', but not 'ab' for 'aab'. The shorter suffix of the two will always be a critical factorization. */ if (max_suffix_rev + 1 < max_suffix + 1) return max_suffix + 1; *period = p; return max_suffix_rev + 1; } /* Return the first location of non-empty NEEDLE within HAYSTACK, or NULL. HAYSTACK_LEN is the minimum known length of HAYSTACK. This method is optimized for NEEDLE_LEN < LONG_NEEDLE_THRESHOLD. Performance is guaranteed to be linear, with an initialization cost of 2 * NEEDLE_LEN comparisons. If AVAILABLE does not modify HAYSTACK_LEN (as in memmem), then at most 2 * HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching. If AVAILABLE modifies HAYSTACK_LEN (as in strstr), then at most 3 * HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching. */ static RETURN_TYPE two_way_short_needle (const unsigned char *haystack, size_t haystack_len, const unsigned char *needle, size_t needle_len) { size_t i; /* Index into current byte of NEEDLE. */ size_t j; /* Index into current window of HAYSTACK. */ size_t period; /* The period of the right half of needle. */ size_t suffix; /* The index of the right half of needle. */ /* Factor the needle into two halves, such that the left half is smaller than the global period, and the right half is periodic (with a period as large as NEEDLE_LEN - suffix). */ suffix = critical_factorization (needle, needle_len, &period); /* Perform the search. Each iteration compares the right half first. */ if (CMP_FUNC (needle, needle + period, suffix) == 0) { /* Entire needle is periodic; a mismatch in the left half can only advance by the period, so use memory to avoid rescanning known occurrences of the period in the right half. */ size_t memory = 0; j = 0; while (AVAILABLE (haystack, haystack_len, j, needle_len)) { /* Scan for matches in right half. */ i = MAX (suffix, memory); while (i < needle_len && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) ++i; if (needle_len <= i) { /* Scan for matches in left half. */ i = suffix - 1; while (memory < i + 1 && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) --i; if (i + 1 < memory + 1) return (RETURN_TYPE) (haystack + j); /* No match, so remember how many repetitions of period on the right half were scanned. */ j += period; memory = needle_len - period; } else { j += i - suffix + 1; memory = 0; } } } else { /* The two halves of needle are distinct; no extra memory is required, and any mismatch results in a maximal shift. */ period = MAX (suffix, needle_len - suffix) + 1; j = 0; while (AVAILABLE (haystack, haystack_len, j, needle_len)) { /* Scan for matches in right half. */ i = suffix; while (i < needle_len && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) ++i; if (needle_len <= i) { /* Scan for matches in left half. */ i = suffix - 1; while (i != SIZE_MAX && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) --i; if (i == SIZE_MAX) return (RETURN_TYPE) (haystack + j); j += period; } else j += i - suffix + 1; } } return NULL; } /* Return the first location of non-empty NEEDLE within HAYSTACK, or NULL. HAYSTACK_LEN is the minimum known length of HAYSTACK. This method is optimized for LONG_NEEDLE_THRESHOLD <= NEEDLE_LEN. Performance is guaranteed to be linear, with an initialization cost of 3 * NEEDLE_LEN + (1 << CHAR_BIT) operations. If AVAILABLE does not modify HAYSTACK_LEN (as in memmem), then at most 2 * HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching, and sublinear performance O(HAYSTACK_LEN / NEEDLE_LEN) is possible. If AVAILABLE modifies HAYSTACK_LEN (as in strstr), then at most 3 * HAYSTACK_LEN - NEEDLE_LEN comparisons occur in searching, and sublinear performance is not possible. */ static RETURN_TYPE two_way_long_needle (const unsigned char *haystack, size_t haystack_len, const unsigned char *needle, size_t needle_len) { size_t i; /* Index into current byte of NEEDLE. */ size_t j; /* Index into current window of HAYSTACK. */ size_t period; /* The period of the right half of needle. */ size_t suffix; /* The index of the right half of needle. */ size_t shift_table[1U << CHAR_BIT]; /* See below. */ /* Factor the needle into two halves, such that the left half is smaller than the global period, and the right half is periodic (with a period as large as NEEDLE_LEN - suffix). */ suffix = critical_factorization (needle, needle_len, &period); /* Populate shift_table. For each possible byte value c, shift_table[c] is the distance from the last occurrence of c to the end of NEEDLE, or NEEDLE_LEN if c is absent from the NEEDLE. shift_table[NEEDLE[NEEDLE_LEN - 1]] contains the only 0. */ for (i = 0; i < 1U << CHAR_BIT; i++) shift_table[i] = needle_len; for (i = 0; i < needle_len; i++) shift_table[CANON_ELEMENT (needle[i])] = needle_len - i - 1; /* Perform the search. Each iteration compares the right half first. */ if (CMP_FUNC (needle, needle + period, suffix) == 0) { /* Entire needle is periodic; a mismatch in the left half can only advance by the period, so use memory to avoid rescanning known occurrences of the period in the right half. */ size_t memory = 0; size_t shift; j = 0; while (AVAILABLE (haystack, haystack_len, j, needle_len)) { /* Check the last byte first; if it does not match, then shift to the next possible match location. */ shift = shift_table[CANON_ELEMENT (haystack[j + needle_len - 1])]; if (0 < shift) { if (memory && shift < period) { /* Since needle is periodic, but the last period has a byte out of place, there can be no match until after the mismatch. */ shift = needle_len - period; } memory = 0; j += shift; continue; } /* Scan for matches in right half. The last byte has already been matched, by virtue of the shift table. */ i = MAX (suffix, memory); while (i < needle_len - 1 && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) ++i; if (needle_len - 1 <= i) { /* Scan for matches in left half. */ i = suffix - 1; while (memory < i + 1 && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) --i; if (i + 1 < memory + 1) return (RETURN_TYPE) (haystack + j); /* No match, so remember how many repetitions of period on the right half were scanned. */ j += period; memory = needle_len - period; } else { j += i - suffix + 1; memory = 0; } } } else { /* The two halves of needle are distinct; no extra memory is required, and any mismatch results in a maximal shift. */ size_t shift; period = MAX (suffix, needle_len - suffix) + 1; j = 0; while (AVAILABLE (haystack, haystack_len, j, needle_len)) { /* Check the last byte first; if it does not match, then shift to the next possible match location. */ shift = shift_table[CANON_ELEMENT (haystack[j + needle_len - 1])]; if (0 < shift) { j += shift; continue; } /* Scan for matches in right half. The last byte has already been matched, by virtue of the shift table. */ i = suffix; while (i < needle_len - 1 && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) ++i; if (needle_len - 1 <= i) { /* Scan for matches in left half. */ i = suffix - 1; while (i != SIZE_MAX && (CANON_ELEMENT (needle[i]) == CANON_ELEMENT (haystack[i + j]))) --i; if (i == SIZE_MAX) return (RETURN_TYPE) (haystack + j); j += period; } else j += i - suffix + 1; } } return NULL; } #undef AVAILABLE #undef CANON_ELEMENT #undef CMP_FUNC #undef MAX #undef RETURN_TYPE bfgminer-bfgminer-3.10.0/lib/string.in.h000066400000000000000000001137241226556647300200350ustar00rootroot00000000000000/* A GNU-like . Copyright (C) 1995-1996, 2001-2011 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef _@GUARD_PREFIX@_STRING_H #if __GNUC__ >= 3 @PRAGMA_SYSTEM_HEADER@ #endif @PRAGMA_COLUMNS@ /* The include_next requires a split double-inclusion guard. */ #@INCLUDE_NEXT@ @NEXT_STRING_H@ #ifndef _@GUARD_PREFIX@_STRING_H #define _@GUARD_PREFIX@_STRING_H /* NetBSD 5.0 mis-defines NULL. */ #include /* MirBSD defines mbslen as a macro. */ #if @GNULIB_MBSLEN@ && defined __MirBSD__ # include #endif /* The __attribute__ feature is available in gcc versions 2.5 and later. The attribute __pure__ was added in gcc 2.96. */ #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96) # define _GL_ATTRIBUTE_PURE __attribute__ ((__pure__)) #else # define _GL_ATTRIBUTE_PURE /* empty */ #endif /* NetBSD 5.0 declares strsignal in , not in . */ /* But in any case avoid namespace pollution on glibc systems. */ #if (@GNULIB_STRSIGNAL@ || defined GNULIB_POSIXCHECK) && defined __NetBSD__ \ && ! defined __GLIBC__ # include #endif /* The definitions of _GL_FUNCDECL_RPL etc. are copied here. */ /* The definition of _GL_ARG_NONNULL is copied here. */ /* The definition of _GL_WARN_ON_USE is copied here. */ /* Return the first instance of C within N bytes of S, or NULL. */ #if @GNULIB_MEMCHR@ # if @REPLACE_MEMCHR@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define memchr rpl_memchr # endif _GL_FUNCDECL_RPL (memchr, void *, (void const *__s, int __c, size_t __n) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (memchr, void *, (void const *__s, int __c, size_t __n)); # else # if ! @HAVE_MEMCHR@ _GL_FUNCDECL_SYS (memchr, void *, (void const *__s, int __c, size_t __n) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); # endif /* On some systems, this function is defined as an overloaded function: extern "C" { const void * std::memchr (const void *, int, size_t); } extern "C++" { void * std::memchr (void *, int, size_t); } */ _GL_CXXALIAS_SYS_CAST2 (memchr, void *, (void const *__s, int __c, size_t __n), void const *, (void const *__s, int __c, size_t __n)); # endif # if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) _GL_CXXALIASWARN1 (memchr, void *, (void *__s, int __c, size_t __n)); _GL_CXXALIASWARN1 (memchr, void const *, (void const *__s, int __c, size_t __n)); # else _GL_CXXALIASWARN (memchr); # endif #elif defined GNULIB_POSIXCHECK # undef memchr /* Assume memchr is always declared. */ _GL_WARN_ON_USE (memchr, "memchr has platform-specific bugs - " "use gnulib module memchr for portability" ); #endif /* Return the first occurrence of NEEDLE in HAYSTACK. */ #if @GNULIB_MEMMEM@ # if @REPLACE_MEMMEM@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define memmem rpl_memmem # endif _GL_FUNCDECL_RPL (memmem, void *, (void const *__haystack, size_t __haystack_len, void const *__needle, size_t __needle_len) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1, 3))); _GL_CXXALIAS_RPL (memmem, void *, (void const *__haystack, size_t __haystack_len, void const *__needle, size_t __needle_len)); # else # if ! @HAVE_DECL_MEMMEM@ _GL_FUNCDECL_SYS (memmem, void *, (void const *__haystack, size_t __haystack_len, void const *__needle, size_t __needle_len) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1, 3))); # endif _GL_CXXALIAS_SYS (memmem, void *, (void const *__haystack, size_t __haystack_len, void const *__needle, size_t __needle_len)); # endif _GL_CXXALIASWARN (memmem); #elif defined GNULIB_POSIXCHECK # undef memmem # if HAVE_RAW_DECL_MEMMEM _GL_WARN_ON_USE (memmem, "memmem is unportable and often quadratic - " "use gnulib module memmem-simple for portability, " "and module memmem for speed" ); # endif #endif /* Copy N bytes of SRC to DEST, return pointer to bytes after the last written byte. */ #if @GNULIB_MEMPCPY@ # if ! @HAVE_MEMPCPY@ _GL_FUNCDECL_SYS (mempcpy, void *, (void *restrict __dest, void const *restrict __src, size_t __n) _GL_ARG_NONNULL ((1, 2))); # endif _GL_CXXALIAS_SYS (mempcpy, void *, (void *restrict __dest, void const *restrict __src, size_t __n)); _GL_CXXALIASWARN (mempcpy); #elif defined GNULIB_POSIXCHECK # undef mempcpy # if HAVE_RAW_DECL_MEMPCPY _GL_WARN_ON_USE (mempcpy, "mempcpy is unportable - " "use gnulib module mempcpy for portability"); # endif #endif /* Search backwards through a block for a byte (specified as an int). */ #if @GNULIB_MEMRCHR@ # if ! @HAVE_DECL_MEMRCHR@ _GL_FUNCDECL_SYS (memrchr, void *, (void const *, int, size_t) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); # endif /* On some systems, this function is defined as an overloaded function: extern "C++" { const void * std::memrchr (const void *, int, size_t); } extern "C++" { void * std::memrchr (void *, int, size_t); } */ _GL_CXXALIAS_SYS_CAST2 (memrchr, void *, (void const *, int, size_t), void const *, (void const *, int, size_t)); # if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) _GL_CXXALIASWARN1 (memrchr, void *, (void *, int, size_t)); _GL_CXXALIASWARN1 (memrchr, void const *, (void const *, int, size_t)); # else _GL_CXXALIASWARN (memrchr); # endif #elif defined GNULIB_POSIXCHECK # undef memrchr # if HAVE_RAW_DECL_MEMRCHR _GL_WARN_ON_USE (memrchr, "memrchr is unportable - " "use gnulib module memrchr for portability"); # endif #endif /* Find the first occurrence of C in S. More efficient than memchr(S,C,N), at the expense of undefined behavior if C does not occur within N bytes. */ #if @GNULIB_RAWMEMCHR@ # if ! @HAVE_RAWMEMCHR@ _GL_FUNCDECL_SYS (rawmemchr, void *, (void const *__s, int __c_in) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); # endif /* On some systems, this function is defined as an overloaded function: extern "C++" { const void * std::rawmemchr (const void *, int); } extern "C++" { void * std::rawmemchr (void *, int); } */ _GL_CXXALIAS_SYS_CAST2 (rawmemchr, void *, (void const *__s, int __c_in), void const *, (void const *__s, int __c_in)); # if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) _GL_CXXALIASWARN1 (rawmemchr, void *, (void *__s, int __c_in)); _GL_CXXALIASWARN1 (rawmemchr, void const *, (void const *__s, int __c_in)); # else _GL_CXXALIASWARN (rawmemchr); # endif #elif defined GNULIB_POSIXCHECK # undef rawmemchr # if HAVE_RAW_DECL_RAWMEMCHR _GL_WARN_ON_USE (rawmemchr, "rawmemchr is unportable - " "use gnulib module rawmemchr for portability"); # endif #endif /* Copy SRC to DST, returning the address of the terminating '\0' in DST. */ #if @GNULIB_STPCPY@ # if ! @HAVE_STPCPY@ _GL_FUNCDECL_SYS (stpcpy, char *, (char *restrict __dst, char const *restrict __src) _GL_ARG_NONNULL ((1, 2))); # endif _GL_CXXALIAS_SYS (stpcpy, char *, (char *restrict __dst, char const *restrict __src)); _GL_CXXALIASWARN (stpcpy); #elif defined GNULIB_POSIXCHECK # undef stpcpy # if HAVE_RAW_DECL_STPCPY _GL_WARN_ON_USE (stpcpy, "stpcpy is unportable - " "use gnulib module stpcpy for portability"); # endif #endif /* Copy no more than N bytes of SRC to DST, returning a pointer past the last non-NUL byte written into DST. */ #if @GNULIB_STPNCPY@ # if @REPLACE_STPNCPY@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef stpncpy # define stpncpy rpl_stpncpy # endif _GL_FUNCDECL_RPL (stpncpy, char *, (char *restrict __dst, char const *restrict __src, size_t __n) _GL_ARG_NONNULL ((1, 2))); _GL_CXXALIAS_RPL (stpncpy, char *, (char *restrict __dst, char const *restrict __src, size_t __n)); # else # if ! @HAVE_STPNCPY@ _GL_FUNCDECL_SYS (stpncpy, char *, (char *restrict __dst, char const *restrict __src, size_t __n) _GL_ARG_NONNULL ((1, 2))); # endif _GL_CXXALIAS_SYS (stpncpy, char *, (char *restrict __dst, char const *restrict __src, size_t __n)); # endif _GL_CXXALIASWARN (stpncpy); #elif defined GNULIB_POSIXCHECK # undef stpncpy # if HAVE_RAW_DECL_STPNCPY _GL_WARN_ON_USE (stpncpy, "stpncpy is unportable - " "use gnulib module stpncpy for portability"); # endif #endif #if defined GNULIB_POSIXCHECK /* strchr() does not work with multibyte strings if the locale encoding is GB18030 and the character to be searched is a digit. */ # undef strchr /* Assume strchr is always declared. */ _GL_WARN_ON_USE (strchr, "strchr cannot work correctly on character strings " "in some multibyte locales - " "use mbschr if you care about internationalization"); #endif /* Find the first occurrence of C in S or the final NUL byte. */ #if @GNULIB_STRCHRNUL@ # if @REPLACE_STRCHRNUL@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define strchrnul rpl_strchrnul # endif _GL_FUNCDECL_RPL (strchrnul, char *, (const char *__s, int __c_in) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (strchrnul, char *, (const char *str, int ch)); # else # if ! @HAVE_STRCHRNUL@ _GL_FUNCDECL_SYS (strchrnul, char *, (char const *__s, int __c_in) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); # endif /* On some systems, this function is defined as an overloaded function: extern "C++" { const char * std::strchrnul (const char *, int); } extern "C++" { char * std::strchrnul (char *, int); } */ _GL_CXXALIAS_SYS_CAST2 (strchrnul, char *, (char const *__s, int __c_in), char const *, (char const *__s, int __c_in)); # endif # if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) _GL_CXXALIASWARN1 (strchrnul, char *, (char *__s, int __c_in)); _GL_CXXALIASWARN1 (strchrnul, char const *, (char const *__s, int __c_in)); # else _GL_CXXALIASWARN (strchrnul); # endif #elif defined GNULIB_POSIXCHECK # undef strchrnul # if HAVE_RAW_DECL_STRCHRNUL _GL_WARN_ON_USE (strchrnul, "strchrnul is unportable - " "use gnulib module strchrnul for portability"); # endif #endif /* Duplicate S, returning an identical malloc'd string. */ #if @GNULIB_STRDUP@ # if @REPLACE_STRDUP@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef strdup # define strdup rpl_strdup # endif _GL_FUNCDECL_RPL (strdup, char *, (char const *__s) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (strdup, char *, (char const *__s)); # else # if defined __cplusplus && defined GNULIB_NAMESPACE && defined strdup /* strdup exists as a function and as a macro. Get rid of the macro. */ # undef strdup # endif # if !(@HAVE_DECL_STRDUP@ || defined strdup) _GL_FUNCDECL_SYS (strdup, char *, (char const *__s) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (strdup, char *, (char const *__s)); # endif _GL_CXXALIASWARN (strdup); #elif defined GNULIB_POSIXCHECK # undef strdup # if HAVE_RAW_DECL_STRDUP _GL_WARN_ON_USE (strdup, "strdup is unportable - " "use gnulib module strdup for portability"); # endif #endif /* Append no more than N characters from SRC onto DEST. */ #if @GNULIB_STRNCAT@ # if @REPLACE_STRNCAT@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef strncat # define strncat rpl_strncat # endif _GL_FUNCDECL_RPL (strncat, char *, (char *dest, const char *src, size_t n) _GL_ARG_NONNULL ((1, 2))); _GL_CXXALIAS_RPL (strncat, char *, (char *dest, const char *src, size_t n)); # else _GL_CXXALIAS_SYS (strncat, char *, (char *dest, const char *src, size_t n)); # endif _GL_CXXALIASWARN (strncat); #elif defined GNULIB_POSIXCHECK # undef strncat # if HAVE_RAW_DECL_STRNCAT _GL_WARN_ON_USE (strncat, "strncat is unportable - " "use gnulib module strncat for portability"); # endif #endif /* Return a newly allocated copy of at most N bytes of STRING. */ #if @GNULIB_STRNDUP@ # if @REPLACE_STRNDUP@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef strndup # define strndup rpl_strndup # endif _GL_FUNCDECL_RPL (strndup, char *, (char const *__string, size_t __n) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (strndup, char *, (char const *__string, size_t __n)); # else # if ! @HAVE_DECL_STRNDUP@ _GL_FUNCDECL_SYS (strndup, char *, (char const *__string, size_t __n) _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (strndup, char *, (char const *__string, size_t __n)); # endif _GL_CXXALIASWARN (strndup); #elif defined GNULIB_POSIXCHECK # undef strndup # if HAVE_RAW_DECL_STRNDUP _GL_WARN_ON_USE (strndup, "strndup is unportable - " "use gnulib module strndup for portability"); # endif #endif /* Find the length (number of bytes) of STRING, but scan at most MAXLEN bytes. If no '\0' terminator is found in that many bytes, return MAXLEN. */ #if @GNULIB_STRNLEN@ # if @REPLACE_STRNLEN@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef strnlen # define strnlen rpl_strnlen # endif _GL_FUNCDECL_RPL (strnlen, size_t, (char const *__string, size_t __maxlen) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (strnlen, size_t, (char const *__string, size_t __maxlen)); # else # if ! @HAVE_DECL_STRNLEN@ _GL_FUNCDECL_SYS (strnlen, size_t, (char const *__string, size_t __maxlen) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1))); # endif _GL_CXXALIAS_SYS (strnlen, size_t, (char const *__string, size_t __maxlen)); # endif _GL_CXXALIASWARN (strnlen); #elif defined GNULIB_POSIXCHECK # undef strnlen # if HAVE_RAW_DECL_STRNLEN _GL_WARN_ON_USE (strnlen, "strnlen is unportable - " "use gnulib module strnlen for portability"); # endif #endif #if defined GNULIB_POSIXCHECK /* strcspn() assumes the second argument is a list of single-byte characters. Even in this simple case, it does not work with multibyte strings if the locale encoding is GB18030 and one of the characters to be searched is a digit. */ # undef strcspn /* Assume strcspn is always declared. */ _GL_WARN_ON_USE (strcspn, "strcspn cannot work correctly on character strings " "in multibyte locales - " "use mbscspn if you care about internationalization"); #endif /* Find the first occurrence in S of any character in ACCEPT. */ #if @GNULIB_STRPBRK@ # if ! @HAVE_STRPBRK@ _GL_FUNCDECL_SYS (strpbrk, char *, (char const *__s, char const *__accept) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1, 2))); # endif /* On some systems, this function is defined as an overloaded function: extern "C" { const char * strpbrk (const char *, const char *); } extern "C++" { char * strpbrk (char *, const char *); } */ _GL_CXXALIAS_SYS_CAST2 (strpbrk, char *, (char const *__s, char const *__accept), const char *, (char const *__s, char const *__accept)); # if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) _GL_CXXALIASWARN1 (strpbrk, char *, (char *__s, char const *__accept)); _GL_CXXALIASWARN1 (strpbrk, char const *, (char const *__s, char const *__accept)); # else _GL_CXXALIASWARN (strpbrk); # endif # if defined GNULIB_POSIXCHECK /* strpbrk() assumes the second argument is a list of single-byte characters. Even in this simple case, it does not work with multibyte strings if the locale encoding is GB18030 and one of the characters to be searched is a digit. */ # undef strpbrk _GL_WARN_ON_USE (strpbrk, "strpbrk cannot work correctly on character strings " "in multibyte locales - " "use mbspbrk if you care about internationalization"); # endif #elif defined GNULIB_POSIXCHECK # undef strpbrk # if HAVE_RAW_DECL_STRPBRK _GL_WARN_ON_USE (strpbrk, "strpbrk is unportable - " "use gnulib module strpbrk for portability"); # endif #endif #if defined GNULIB_POSIXCHECK /* strspn() assumes the second argument is a list of single-byte characters. Even in this simple case, it cannot work with multibyte strings. */ # undef strspn /* Assume strspn is always declared. */ _GL_WARN_ON_USE (strspn, "strspn cannot work correctly on character strings " "in multibyte locales - " "use mbsspn if you care about internationalization"); #endif #if defined GNULIB_POSIXCHECK /* strrchr() does not work with multibyte strings if the locale encoding is GB18030 and the character to be searched is a digit. */ # undef strrchr /* Assume strrchr is always declared. */ _GL_WARN_ON_USE (strrchr, "strrchr cannot work correctly on character strings " "in some multibyte locales - " "use mbsrchr if you care about internationalization"); #endif /* Search the next delimiter (char listed in DELIM) starting at *STRINGP. If one is found, overwrite it with a NUL, and advance *STRINGP to point to the next char after it. Otherwise, set *STRINGP to NULL. If *STRINGP was already NULL, nothing happens. Return the old value of *STRINGP. This is a variant of strtok() that is multithread-safe and supports empty fields. Caveat: It modifies the original string. Caveat: These functions cannot be used on constant strings. Caveat: The identity of the delimiting character is lost. Caveat: It doesn't work with multibyte strings unless all of the delimiter characters are ASCII characters < 0x30. See also strtok_r(). */ #if @GNULIB_STRSEP@ # if ! @HAVE_STRSEP@ _GL_FUNCDECL_SYS (strsep, char *, (char **restrict __stringp, char const *restrict __delim) _GL_ARG_NONNULL ((1, 2))); # endif _GL_CXXALIAS_SYS (strsep, char *, (char **restrict __stringp, char const *restrict __delim)); _GL_CXXALIASWARN (strsep); # if defined GNULIB_POSIXCHECK # undef strsep _GL_WARN_ON_USE (strsep, "strsep cannot work correctly on character strings " "in multibyte locales - " "use mbssep if you care about internationalization"); # endif #elif defined GNULIB_POSIXCHECK # undef strsep # if HAVE_RAW_DECL_STRSEP _GL_WARN_ON_USE (strsep, "strsep is unportable - " "use gnulib module strsep for portability"); # endif #endif #if @GNULIB_STRSTR@ # if @REPLACE_STRSTR@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define strstr rpl_strstr # endif _GL_FUNCDECL_RPL (strstr, char *, (const char *haystack, const char *needle) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1, 2))); _GL_CXXALIAS_RPL (strstr, char *, (const char *haystack, const char *needle)); # else /* On some systems, this function is defined as an overloaded function: extern "C++" { const char * strstr (const char *, const char *); } extern "C++" { char * strstr (char *, const char *); } */ _GL_CXXALIAS_SYS_CAST2 (strstr, char *, (const char *haystack, const char *needle), const char *, (const char *haystack, const char *needle)); # endif # if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) _GL_CXXALIASWARN1 (strstr, char *, (char *haystack, const char *needle)); _GL_CXXALIASWARN1 (strstr, const char *, (const char *haystack, const char *needle)); # else _GL_CXXALIASWARN (strstr); # endif #elif defined GNULIB_POSIXCHECK /* strstr() does not work with multibyte strings if the locale encoding is different from UTF-8: POSIX says that it operates on "strings", and "string" in POSIX is defined as a sequence of bytes, not of characters. */ # undef strstr /* Assume strstr is always declared. */ _GL_WARN_ON_USE (strstr, "strstr is quadratic on many systems, and cannot " "work correctly on character strings in most " "multibyte locales - " "use mbsstr if you care about internationalization, " "or use strstr if you care about speed"); #endif /* Find the first occurrence of NEEDLE in HAYSTACK, using case-insensitive comparison. */ #if @GNULIB_STRCASESTR@ # if @REPLACE_STRCASESTR@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define strcasestr rpl_strcasestr # endif _GL_FUNCDECL_RPL (strcasestr, char *, (const char *haystack, const char *needle) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1, 2))); _GL_CXXALIAS_RPL (strcasestr, char *, (const char *haystack, const char *needle)); # else # if ! @HAVE_STRCASESTR@ _GL_FUNCDECL_SYS (strcasestr, char *, (const char *haystack, const char *needle) _GL_ATTRIBUTE_PURE _GL_ARG_NONNULL ((1, 2))); # endif /* On some systems, this function is defined as an overloaded function: extern "C++" { const char * strcasestr (const char *, const char *); } extern "C++" { char * strcasestr (char *, const char *); } */ _GL_CXXALIAS_SYS_CAST2 (strcasestr, char *, (const char *haystack, const char *needle), const char *, (const char *haystack, const char *needle)); # endif # if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 10) && !defined __UCLIBC__) \ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) _GL_CXXALIASWARN1 (strcasestr, char *, (char *haystack, const char *needle)); _GL_CXXALIASWARN1 (strcasestr, const char *, (const char *haystack, const char *needle)); # else _GL_CXXALIASWARN (strcasestr); # endif #elif defined GNULIB_POSIXCHECK /* strcasestr() does not work with multibyte strings: It is a glibc extension, and glibc implements it only for unibyte locales. */ # undef strcasestr # if HAVE_RAW_DECL_STRCASESTR _GL_WARN_ON_USE (strcasestr, "strcasestr does work correctly on character " "strings in multibyte locales - " "use mbscasestr if you care about " "internationalization, or use c-strcasestr if you want " "a locale independent function"); # endif #endif /* Parse S into tokens separated by characters in DELIM. If S is NULL, the saved pointer in SAVE_PTR is used as the next starting point. For example: char s[] = "-abc-=-def"; char *sp; x = strtok_r(s, "-", &sp); // x = "abc", sp = "=-def" x = strtok_r(NULL, "-=", &sp); // x = "def", sp = NULL x = strtok_r(NULL, "=", &sp); // x = NULL // s = "abc\0-def\0" This is a variant of strtok() that is multithread-safe. For the POSIX documentation for this function, see: http://www.opengroup.org/susv3xsh/strtok.html Caveat: It modifies the original string. Caveat: These functions cannot be used on constant strings. Caveat: The identity of the delimiting character is lost. Caveat: It doesn't work with multibyte strings unless all of the delimiter characters are ASCII characters < 0x30. See also strsep(). */ #if (!defined(strtok_r)) || @REPLACE_STRTOK_R@ #if @GNULIB_STRTOK_R@ # if @REPLACE_STRTOK_R@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef strtok_r # define strtok_r rpl_strtok_r # endif _GL_FUNCDECL_RPL (strtok_r, char *, (char *restrict s, char const *restrict delim, char **restrict save_ptr) _GL_ARG_NONNULL ((2, 3))); _GL_CXXALIAS_RPL (strtok_r, char *, (char *restrict s, char const *restrict delim, char **restrict save_ptr)); # else # if @UNDEFINE_STRTOK_R@ || defined GNULIB_POSIXCHECK # undef strtok_r # endif # if ! @HAVE_DECL_STRTOK_R@ _GL_FUNCDECL_SYS (strtok_r, char *, (char *restrict s, char const *restrict delim, char **restrict save_ptr) _GL_ARG_NONNULL ((2, 3))); # endif _GL_CXXALIAS_SYS (strtok_r, char *, (char *restrict s, char const *restrict delim, char **restrict save_ptr)); # endif _GL_CXXALIASWARN (strtok_r); # if defined GNULIB_POSIXCHECK _GL_WARN_ON_USE (strtok_r, "strtok_r cannot work correctly on character " "strings in multibyte locales - " "use mbstok_r if you care about internationalization"); # endif #elif defined GNULIB_POSIXCHECK # undef strtok_r # if HAVE_RAW_DECL_STRTOK_R _GL_WARN_ON_USE (strtok_r, "strtok_r is unportable - " "use gnulib module strtok_r for portability"); # endif #endif #endif /* The following functions are not specified by POSIX. They are gnulib extensions. */ #if @GNULIB_MBSLEN@ /* Return the number of multibyte characters in the character string STRING. This considers multibyte characters, unlike strlen, which counts bytes. */ # ifdef __MirBSD__ /* MirBSD defines mbslen as a macro. Override it. */ # undef mbslen # endif # if @HAVE_MBSLEN@ /* AIX, OSF/1, MirBSD define mbslen already in libc. */ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define mbslen rpl_mbslen # endif _GL_FUNCDECL_RPL (mbslen, size_t, (const char *string) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (mbslen, size_t, (const char *string)); # else _GL_FUNCDECL_SYS (mbslen, size_t, (const char *string) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_SYS (mbslen, size_t, (const char *string)); # endif _GL_CXXALIASWARN (mbslen); #endif #if @GNULIB_MBSNLEN@ /* Return the number of multibyte characters in the character string starting at STRING and ending at STRING + LEN. */ _GL_EXTERN_C size_t mbsnlen (const char *string, size_t len) _GL_ARG_NONNULL ((1)); #endif #if @GNULIB_MBSCHR@ /* Locate the first single-byte character C in the character string STRING, and return a pointer to it. Return NULL if C is not found in STRING. Unlike strchr(), this function works correctly in multibyte locales with encodings such as GB18030. */ # if defined __hpux # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define mbschr rpl_mbschr /* avoid collision with HP-UX function */ # endif _GL_FUNCDECL_RPL (mbschr, char *, (const char *string, int c) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (mbschr, char *, (const char *string, int c)); # else _GL_FUNCDECL_SYS (mbschr, char *, (const char *string, int c) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_SYS (mbschr, char *, (const char *string, int c)); # endif _GL_CXXALIASWARN (mbschr); #endif #if @GNULIB_MBSRCHR@ /* Locate the last single-byte character C in the character string STRING, and return a pointer to it. Return NULL if C is not found in STRING. Unlike strrchr(), this function works correctly in multibyte locales with encodings such as GB18030. */ # if defined __hpux || defined __INTERIX # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define mbsrchr rpl_mbsrchr /* avoid collision with system function */ # endif _GL_FUNCDECL_RPL (mbsrchr, char *, (const char *string, int c) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_RPL (mbsrchr, char *, (const char *string, int c)); # else _GL_FUNCDECL_SYS (mbsrchr, char *, (const char *string, int c) _GL_ARG_NONNULL ((1))); _GL_CXXALIAS_SYS (mbsrchr, char *, (const char *string, int c)); # endif _GL_CXXALIASWARN (mbsrchr); #endif #if @GNULIB_MBSSTR@ /* Find the first occurrence of the character string NEEDLE in the character string HAYSTACK. Return NULL if NEEDLE is not found in HAYSTACK. Unlike strstr(), this function works correctly in multibyte locales with encodings different from UTF-8. */ _GL_EXTERN_C char * mbsstr (const char *haystack, const char *needle) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSCASECMP@ /* Compare the character strings S1 and S2, ignoring case, returning less than, equal to or greater than zero if S1 is lexicographically less than, equal to or greater than S2. Note: This function may, in multibyte locales, return 0 for strings of different lengths! Unlike strcasecmp(), this function works correctly in multibyte locales. */ _GL_EXTERN_C int mbscasecmp (const char *s1, const char *s2) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSNCASECMP@ /* Compare the initial segment of the character string S1 consisting of at most N characters with the initial segment of the character string S2 consisting of at most N characters, ignoring case, returning less than, equal to or greater than zero if the initial segment of S1 is lexicographically less than, equal to or greater than the initial segment of S2. Note: This function may, in multibyte locales, return 0 for initial segments of different lengths! Unlike strncasecmp(), this function works correctly in multibyte locales. But beware that N is not a byte count but a character count! */ _GL_EXTERN_C int mbsncasecmp (const char *s1, const char *s2, size_t n) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSPCASECMP@ /* Compare the initial segment of the character string STRING consisting of at most mbslen (PREFIX) characters with the character string PREFIX, ignoring case. If the two match, return a pointer to the first byte after this prefix in STRING. Otherwise, return NULL. Note: This function may, in multibyte locales, return non-NULL if STRING is of smaller length than PREFIX! Unlike strncasecmp(), this function works correctly in multibyte locales. */ _GL_EXTERN_C char * mbspcasecmp (const char *string, const char *prefix) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSCASESTR@ /* Find the first occurrence of the character string NEEDLE in the character string HAYSTACK, using case-insensitive comparison. Note: This function may, in multibyte locales, return success even if strlen (haystack) < strlen (needle) ! Unlike strcasestr(), this function works correctly in multibyte locales. */ _GL_EXTERN_C char * mbscasestr (const char *haystack, const char *needle) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSCSPN@ /* Find the first occurrence in the character string STRING of any character in the character string ACCEPT. Return the number of bytes from the beginning of the string to this occurrence, or to the end of the string if none exists. Unlike strcspn(), this function works correctly in multibyte locales. */ _GL_EXTERN_C size_t mbscspn (const char *string, const char *accept) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSPBRK@ /* Find the first occurrence in the character string STRING of any character in the character string ACCEPT. Return the pointer to it, or NULL if none exists. Unlike strpbrk(), this function works correctly in multibyte locales. */ # if defined __hpux # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define mbspbrk rpl_mbspbrk /* avoid collision with HP-UX function */ # endif _GL_FUNCDECL_RPL (mbspbrk, char *, (const char *string, const char *accept) _GL_ARG_NONNULL ((1, 2))); _GL_CXXALIAS_RPL (mbspbrk, char *, (const char *string, const char *accept)); # else _GL_FUNCDECL_SYS (mbspbrk, char *, (const char *string, const char *accept) _GL_ARG_NONNULL ((1, 2))); _GL_CXXALIAS_SYS (mbspbrk, char *, (const char *string, const char *accept)); # endif _GL_CXXALIASWARN (mbspbrk); #endif #if @GNULIB_MBSSPN@ /* Find the first occurrence in the character string STRING of any character not in the character string REJECT. Return the number of bytes from the beginning of the string to this occurrence, or to the end of the string if none exists. Unlike strspn(), this function works correctly in multibyte locales. */ _GL_EXTERN_C size_t mbsspn (const char *string, const char *reject) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSSEP@ /* Search the next delimiter (multibyte character listed in the character string DELIM) starting at the character string *STRINGP. If one is found, overwrite it with a NUL, and advance *STRINGP to point to the next multibyte character after it. Otherwise, set *STRINGP to NULL. If *STRINGP was already NULL, nothing happens. Return the old value of *STRINGP. This is a variant of mbstok_r() that supports empty fields. Caveat: It modifies the original string. Caveat: These functions cannot be used on constant strings. Caveat: The identity of the delimiting character is lost. See also mbstok_r(). */ _GL_EXTERN_C char * mbssep (char **stringp, const char *delim) _GL_ARG_NONNULL ((1, 2)); #endif #if @GNULIB_MBSTOK_R@ /* Parse the character string STRING into tokens separated by characters in the character string DELIM. If STRING is NULL, the saved pointer in SAVE_PTR is used as the next starting point. For example: char s[] = "-abc-=-def"; char *sp; x = mbstok_r(s, "-", &sp); // x = "abc", sp = "=-def" x = mbstok_r(NULL, "-=", &sp); // x = "def", sp = NULL x = mbstok_r(NULL, "=", &sp); // x = NULL // s = "abc\0-def\0" Caveat: It modifies the original string. Caveat: These functions cannot be used on constant strings. Caveat: The identity of the delimiting character is lost. See also mbssep(). */ _GL_EXTERN_C char * mbstok_r (char *string, const char *delim, char **save_ptr) _GL_ARG_NONNULL ((2, 3)); #endif /* Map any int, typically from errno, into an error message. */ #if @GNULIB_STRERROR@ # if @REPLACE_STRERROR@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef strerror # define strerror rpl_strerror # endif _GL_FUNCDECL_RPL (strerror, char *, (int)); _GL_CXXALIAS_RPL (strerror, char *, (int)); # else _GL_CXXALIAS_SYS (strerror, char *, (int)); # endif _GL_CXXALIASWARN (strerror); #elif defined GNULIB_POSIXCHECK # undef strerror /* Assume strerror is always declared. */ _GL_WARN_ON_USE (strerror, "strerror is unportable - " "use gnulib module strerror to guarantee non-NULL result"); #endif /* Map any int, typically from errno, into an error message. Multithread-safe. Uses the POSIX declaration, not the glibc declaration. */ #if @GNULIB_STRERROR_R@ # if @REPLACE_STRERROR_R@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # undef strerror_r # define strerror_r rpl_strerror_r # endif _GL_FUNCDECL_RPL (strerror_r, int, (int errnum, char *buf, size_t buflen) _GL_ARG_NONNULL ((2))); _GL_CXXALIAS_RPL (strerror_r, int, (int errnum, char *buf, size_t buflen)); # else # if !@HAVE_DECL_STRERROR_R@ _GL_FUNCDECL_SYS (strerror_r, int, (int errnum, char *buf, size_t buflen) _GL_ARG_NONNULL ((2))); # endif _GL_CXXALIAS_SYS (strerror_r, int, (int errnum, char *buf, size_t buflen)); # endif # if @HAVE_DECL_STRERROR_R@ _GL_CXXALIASWARN (strerror_r); # endif #elif defined GNULIB_POSIXCHECK # undef strerror_r # if HAVE_RAW_DECL_STRERROR_R _GL_WARN_ON_USE (strerror_r, "strerror_r is unportable - " "use gnulib module strerror_r-posix for portability"); # endif #endif #if @GNULIB_STRSIGNAL@ # if @REPLACE_STRSIGNAL@ # if !(defined __cplusplus && defined GNULIB_NAMESPACE) # define strsignal rpl_strsignal # endif _GL_FUNCDECL_RPL (strsignal, char *, (int __sig)); _GL_CXXALIAS_RPL (strsignal, char *, (int __sig)); # else # if ! @HAVE_DECL_STRSIGNAL@ _GL_FUNCDECL_SYS (strsignal, char *, (int __sig)); # endif /* Need to cast, because on Cygwin 1.5.x systems, the return type is 'const char *'. */ _GL_CXXALIAS_SYS_CAST (strsignal, char *, (int __sig)); # endif _GL_CXXALIASWARN (strsignal); #elif defined GNULIB_POSIXCHECK # undef strsignal # if HAVE_RAW_DECL_STRSIGNAL _GL_WARN_ON_USE (strsignal, "strsignal is unportable - " "use gnulib module strsignal for portability"); # endif #endif #if @GNULIB_STRVERSCMP@ # if !@HAVE_STRVERSCMP@ _GL_FUNCDECL_SYS (strverscmp, int, (const char *, const char *) _GL_ARG_NONNULL ((1, 2))); # endif _GL_CXXALIAS_SYS (strverscmp, int, (const char *, const char *)); _GL_CXXALIASWARN (strverscmp); #elif defined GNULIB_POSIXCHECK # undef strverscmp # if HAVE_RAW_DECL_STRVERSCMP _GL_WARN_ON_USE (strverscmp, "strverscmp is unportable - " "use gnulib module strverscmp for portability"); # endif #endif #endif /* _@GUARD_PREFIX@_STRING_H */ #endif /* _@GUARD_PREFIX@_STRING_H */ bfgminer-bfgminer-3.10.0/lib/strtok_r.c000066400000000000000000000041141226556647300177540ustar00rootroot00000000000000/* Reentrant string tokenizer. Generic version. Copyright (C) 1991, 1996-1999, 2001, 2004, 2007, 2009-2011 Free Software Foundation, Inc. This file is part of the GNU C Library. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ #ifdef HAVE_CONFIG_H # include #endif #include #ifdef _LIBC # undef strtok_r # undef __strtok_r #else # define __strtok_r strtok_r # define __rawmemchr strchr #endif /* Parse S into tokens separated by characters in DELIM. If S is NULL, the saved pointer in SAVE_PTR is used as the next starting point. For example: char s[] = "-abc-=-def"; char *sp; x = strtok_r(s, "-", &sp); // x = "abc", sp = "=-def" x = strtok_r(NULL, "-=", &sp); // x = "def", sp = NULL x = strtok_r(NULL, "=", &sp); // x = NULL // s = "abc\0-def\0" */ char * __strtok_r (char *s, const char *delim, char **save_ptr) { char *token; if (s == NULL) s = *save_ptr; /* Scan leading delimiters. */ s += strspn (s, delim); if (*s == '\0') { *save_ptr = s; return NULL; } /* Find the end of the token. */ token = s; s = strpbrk (token, delim); if (s == NULL) /* This token finishes the string. */ *save_ptr = __rawmemchr (token, '\0'); else { /* Terminate the token and make *SAVE_PTR point past it. */ *s = '\0'; *save_ptr = s + 1; } return token; } #ifdef weak_alias libc_hidden_def (__strtok_r) weak_alias (__strtok_r, strtok_r) #endif bfgminer-bfgminer-3.10.0/libbitfury.c000066400000000000000000000311511226556647300175130ustar00rootroot00000000000000/* * Copyright 2013 bitfury * Copyright 2013 Anatoly Legkodymov * Copyright 2013 Luke Dashjr * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config.h" #include #include #include #include #include #include "logging.h" #include "miner.h" #include "libbitfury.h" #include "spidevc.h" #include "sha2.h" #include #define BITFURY_REFRESH_DELAY 100 #define BITFURY_DETECT_TRIES 3000 / BITFURY_REFRESH_DELAY unsigned bitfury_decnonce(unsigned in); /* Configuration registers - control oscillators and such stuff. PROGRAMMED when magic number is matches, UNPROGRAMMED (default) otherwise */ static void bitfury_config_reg(struct spi_port *port, int cfgreg, int ena) { static const uint8_t enaconf[4] = { 0xc1, 0x6a, 0x59, 0xe3 }; static const uint8_t disconf[4] = { 0, 0, 0, 0 }; if (ena) spi_emit_data(port, 0x7000+cfgreg*32, enaconf, 4); else spi_emit_data(port, 0x7000+cfgreg*32, disconf, 4); } #define FIRST_BASE 61 #define SECOND_BASE 4 static const int8_t bitfury_counters[16] = { 64, 64, SECOND_BASE, SECOND_BASE+4, SECOND_BASE+2, SECOND_BASE+2+16, SECOND_BASE, SECOND_BASE+1, (FIRST_BASE)%65, (FIRST_BASE+1)%65, (FIRST_BASE+3)%65, (FIRST_BASE+3+16)%65, (FIRST_BASE+4)%65, (FIRST_BASE+4+4)%65, (FIRST_BASE+3+3)%65, (FIRST_BASE+3+1+3)%65}; /* Oscillator setup variants (maybe more), values inside of chip ANDed to not allow by programming errors work it at higher speeds */ /* WARNING! no chip temperature control limits, etc. It may self-fry and make fried chips with great ease :-) So if trying to overclock */ /* Do not place chip near flammable objects, provide adequate power protection and better wear eye protection ! */ /* Thermal runaway in this case could produce nice flames of chippy fries */ // Thermometer code from left to right - more ones ==> faster clock! #define rotrFixed(x,y) (((x) >> (y)) | ((x) << (32-(y)))) #define s0(x) (rotrFixed(x,7)^rotrFixed(x,18)^(x>>3)) #define s1(x) (rotrFixed(x,17)^rotrFixed(x,19)^(x>>10)) #define Ch(x,y,z) (z^(x&(y^z))) #define Maj(x,y,z) (y^((x^y)&(y^z))) #define S0(x) (rotrFixed(x,2)^rotrFixed(x,13)^rotrFixed(x,22)) #define S1(x) (rotrFixed(x,6)^rotrFixed(x,11)^rotrFixed(x,25)) /* SHA256 CONSTANTS */ static const unsigned SHA_K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; static void libbitfury_ms3_compute(unsigned *p) { unsigned a,b,c,d,e,f,g,h, ne, na, i; a = p[0]; b = p[1]; c = p[2]; d = p[3]; e = p[4]; f = p[5]; g = p[6]; h = p[7]; for (i = 0; i < 3; i++) { ne = p[i+16] + SHA_K[i] + h + Ch(e,f,g) + S1(e) + d; na = p[i+16] + SHA_K[i] + h + Ch(e,f,g) + S1(e) + S0(a) + Maj(a,b,c); d = c; c = b; b = a; a = na; h = g; g = f; f = e; e = ne; } p[15] = a; p[14] = b; p[13] = c; p[12] = d; p[11] = e; p[10] = f; p[9] = g; p[8] = h; } static void bitfury_send_conf(struct spi_port *port) { int i; for (i = 7; i <= 11; ++i) bitfury_config_reg(port, i, 0); bitfury_config_reg(port, 6, 0); /* disable OUTSLK */ bitfury_config_reg(port, 4, 1); /* Enable slow oscillator */ for (i = 1; i <= 3; ++i) bitfury_config_reg(port, i, 0); spi_emit_data(port, 0x0100, bitfury_counters, 16); /* Program counters correctly for rounds processing, here baby should start consuming power */ } static void bitfury_send_init(struct spi_port *port) { /* Prepare internal buffers */ /* PREPARE BUFFERS (INITIAL PROGRAMMING) */ unsigned w[16]; unsigned atrvec[] = { 0xb0e72d8e, 0x1dc5b862, 0xe9e7c4a6, 0x3050f1f5, 0x8a1a6b7e, 0x7ec384e8, 0x42c1c3fc, 0x8ed158a1, /* MIDSTATE */ 0,0,0,0,0,0,0,0, 0x8a0bb7b7, 0x33af304f, 0x0b290c1a, 0xf0c4e61f, /* WDATA: hashMerleRoot[7], nTime, nBits, nNonce */ }; libbitfury_ms3_compute(&atrvec[0]); memset(&w, 0, sizeof(w)); w[3] = 0xffffffff; w[4] = 0x80000000; w[15] = 0x00000280; spi_emit_data(port, 0x1000, w, 16*4); spi_emit_data(port, 0x1400, w, 8*4); memset(w, 0, sizeof(w)); w[0] = 0x80000000; w[7] = 0x100; spi_emit_data(port, 0x1900, &w[0],8*4); /* Prepare MS and W buffers! */ spi_emit_data(port, 0x3000, &atrvec[0], 19*4); } static void bitfury_set_freq(struct spi_port *port, int bits) { uint64_t freq; const uint8_t * osc6 = (unsigned char *)&freq; freq = (1ULL << bits) - 1ULL; spi_emit_data(port, 0x6000, osc6, 8); /* Program internal on-die slow oscillator frequency */ bitfury_config_reg(port, 4, 1); /* Enable slow oscillator */ } void bitfury_send_reinit(struct spi_port *port, int slot, int chip_n, int n) { spi_clear_buf(port); spi_emit_break(port); spi_emit_fasync(port, chip_n); bitfury_set_freq(port, n); bitfury_send_conf(port); bitfury_send_init(port); spi_txrx(port); } void bitfury_send_shutdown(struct spi_port *port, int slot, int chip_n) { spi_clear_buf(port); spi_emit_break(port); spi_emit_fasync(port, chip_n); bitfury_config_reg(port, 4, 0); /* Disable slow oscillator */ spi_txrx(port); } void bitfury_send_freq(struct spi_port *port, int slot, int chip_n, int bits) { spi_clear_buf(port); spi_emit_break(port); spi_emit_fasync(port, chip_n); bitfury_set_freq(port, bits); spi_txrx(port); } static unsigned int libbitfury_c_diff(unsigned ocounter, unsigned counter) { return counter > ocounter ? counter - ocounter : (0x003FFFFF - ocounter) + counter; } static int libbitfury_get_counter(unsigned int *newbuf, unsigned int *oldbuf) { int j; for(j = 0; j < 16; j++) { if (newbuf[j] != oldbuf[j]) { unsigned counter = bitfury_decnonce(newbuf[j]); if ((counter & 0xFFC00000) == 0xdf800000) { counter -= 0xdf800000; return counter; } } } return 0; } static int libbitfury_detect_chip(struct spi_port *port, int chip_n) { /* Test vectors to calculate (using address-translated loads) */ unsigned atrvec[] = { 0xb0e72d8e, 0x1dc5b862, 0xe9e7c4a6, 0x3050f1f5, 0x8a1a6b7e, 0x7ec384e8, 0x42c1c3fc, 0x8ed158a1, /* MIDSTATE */ 0,0,0,0,0,0,0,0, 0x8a0bb7b7, 0x33af304f, 0x0b290c1a, 0xf0c4e61f, /* WDATA: hashMerleRoot[7], nTime, nBits, nNonce */ 0x9c4dfdc0, 0xf055c9e1, 0xe60f079d, 0xeeada6da, 0xd459883d, 0xd8049a9d, 0xd49f9a96, 0x15972fed, /* MIDSTATE */ 0,0,0,0,0,0,0,0, 0x048b2528, 0x7acb2d4f, 0x0b290c1a, 0xbe00084a, /* WDATA: hashMerleRoot[7], nTime, nBits, nNonce */ 0x0317b3ea, 0x1d227d06, 0x3cca281e, 0xa6d0b9da, 0x1a359fe2, 0xa7287e27, 0x8b79c296, 0xc4d88274, /* MIDSTATE */ 0,0,0,0,0,0,0,0, 0x328bcd4f, 0x75462d4f, 0x0b290c1a, 0x002c6dbc, /* WDATA: hashMerleRoot[7], nTime, nBits, nNonce */ 0xac4e38b6, 0xba0e3b3b, 0x649ad6f8, 0xf72e4c02, 0x93be06fb, 0x366d1126, 0xf4aae554, 0x4ff19c5b, /* MIDSTATE */ 0,0,0,0,0,0,0,0, 0x72698140, 0x3bd62b4f, 0x3fd40c1a, 0x801e43e9, /* WDATA: hashMerleRoot[7], nTime, nBits, nNonce */ 0x9dbf91c9, 0x12e5066c, 0xf4184b87, 0x8060bc4d, 0x18f9c115, 0xf589d551, 0x0f7f18ae, 0x885aca59, /* MIDSTATE */ 0,0,0,0,0,0,0,0, 0x6f3806c3, 0x41f82a4f, 0x3fd40c1a, 0x00334b39, /* WDATA: hashMerleRoot[7], nTime, nBits, nNonce */ }; int i; unsigned newbuf[17], oldbuf[17]; unsigned ocounter; int odiff = 0; memset(newbuf, 0, 17 * 4); memset(oldbuf, 0, 17 * 4); libbitfury_ms3_compute(&atrvec[0]); libbitfury_ms3_compute(&atrvec[20]); libbitfury_ms3_compute(&atrvec[40]); spi_clear_buf(port); spi_emit_break(port); /* First we want to break chain! Otherwise we'll get all of traffic bounced to output */ spi_emit_fasync(port, chip_n); bitfury_set_freq(port, 52); //54 - 3F, 53 - 1F bitfury_send_conf(port); bitfury_send_init(port); spi_txrx(port); ocounter = 0; for (i = 0; i < BITFURY_DETECT_TRIES; i++) { int counter; spi_clear_buf(port); spi_emit_break(port); spi_emit_fasync(port, chip_n); spi_emit_data(port, 0x3000, &atrvec[0], 19*4); spi_txrx(port); memcpy(newbuf, spi_getrxbuf(port) + 4 + chip_n, 17*4); counter = libbitfury_get_counter(newbuf, oldbuf); if (ocounter) { unsigned int cdiff = libbitfury_c_diff(ocounter, counter); if (cdiff > 5000 && cdiff < 100000 && odiff > 5000 && odiff < 100000) return 1; odiff = cdiff; } ocounter = counter; if (newbuf[16] != 0 && newbuf[16] != 0xFFFFFFFF) { return 0; } cgsleep_ms(BITFURY_REFRESH_DELAY / 10); memcpy(oldbuf, newbuf, 17 * 4); } return 0; } int libbitfury_detectChips1(struct spi_port *port) { int n; for (n = 0; libbitfury_detect_chip(port, n); ++n) {} return n; } // in = 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 f e d c b a 9 8 7 6 5 4 3 2 1 0 unsigned bitfury_decnonce(unsigned in) { unsigned out; /* First part load */ out = (in & 0xFF) << 24; in >>= 8; /* Byte reversal */ in = (((in & 0xaaaaaaaa) >> 1) | ((in & 0x55555555) << 1)); in = (((in & 0xcccccccc) >> 2) | ((in & 0x33333333) << 2)); in = (((in & 0xf0f0f0f0) >> 4) | ((in & 0x0f0f0f0f) << 4)); out |= (in >> 2)&0x3FFFFF; /* Extraction */ if (in & 1) out |= (1 << 23); if (in & 2) out |= (1 << 22); // out = 7 6 5 4 3 2 1 0 f e 18 19 1a 1b 1c 1d 1e 1f 10 11 12 13 14 15 16 17 8 9 a b c d out -= 0x800004; return out; } static int libbitfury_rehash(const void *midstate, const uint32_t m7, const uint32_t ntime, const uint32_t nbits, uint32_t nnonce) { unsigned char in[16]; unsigned int *in32 = (unsigned int *)in; unsigned int *mid32 = (unsigned int *)midstate; unsigned out32[8]; unsigned char *out = (unsigned char *) out32; #ifdef BITFURY_REHASH_DEBUG static unsigned history[512]; static unsigned history_p; #endif sha256_ctx ctx; memset( &ctx, 0, sizeof( sha256_ctx ) ); memcpy(ctx.h, mid32, 8*4); ctx.tot_len = 64; ctx.len = 0; nnonce = bswap_32(nnonce); in32[0] = bswap_32(m7); in32[1] = bswap_32(ntime); in32[2] = bswap_32(nbits); in32[3] = nnonce; sha256_update(&ctx, in, 16); sha256_final(&ctx, out); sha256(out, 32, out); if (out32[7] == 0) { #ifdef BITFURY_REHASH_DEBUG char hex[65]; bin2hex(hex, out, 32); applog(LOG_INFO, "! MS0: %08x, m7: %08x, ntime: %08x, nbits: %08x, nnonce: %08x", mid32[0], m7, ntime, nbits, nnonce); applog(LOG_INFO, " out: %s", hex); history[history_p] = nnonce; history_p++; history_p &= 512 - 1; #endif return 1; } return 0; } bool bitfury_fudge_nonce(const void *midstate, const uint32_t m7, const uint32_t ntime, const uint32_t nbits, uint32_t *nonce_p) { static const uint32_t offsets[] = {0, 0xffc00000, 0xff800000, 0x02800000, 0x02C00000, 0x00400000}; uint32_t nonce; int i; for (i = 0; i < 6; ++i) { nonce = *nonce_p + offsets[i]; if (libbitfury_rehash(midstate, m7, ntime, nbits, nonce)) { *nonce_p = nonce; return true; } } return false; } void work_to_bitfury_payload(struct bitfury_payload *p, struct work *w) { unsigned char flipped_data[80]; memset(p, 0, sizeof(struct bitfury_payload)); swap32yes(flipped_data, w->data, 80 / 4); memcpy(p->midstate, w->midstate, 32); p->m7 = bswap_32(*(unsigned *)(flipped_data + 64)); p->ntime = bswap_32(*(unsigned *)(flipped_data + 68)); p->nbits = bswap_32(*(unsigned *)(flipped_data + 72)); } void bitfury_payload_to_atrvec(uint32_t *atrvec, struct bitfury_payload *p) { /* Programming next value */ memcpy(atrvec, p, 20*4); libbitfury_ms3_compute(atrvec); } bfgminer-bfgminer-3.10.0/libbitfury.h000066400000000000000000000031231226556647300175160ustar00rootroot00000000000000#ifndef __LIBBITFURY_H__ #define __LIBBITFURY_H__ #include #include #include "miner.h" #include "spidevc.h" struct work; #define BITFURY_STAT_N 1024 struct bitfury_payload { unsigned char midstate[32]; unsigned int junk[8]; unsigned m7; unsigned ntime; unsigned nbits; unsigned nnonce; }; struct freq_stat { double *mh; double *s; int osc6_min; int osc6_max; double omh; double os; int best_osc; int best_done; }; struct bitfury_device { struct spi_port *spi; unsigned char osc6_bits; unsigned newbuf[17]; unsigned oldbuf[17]; bool oldjob; int active; uint32_t atrvec[20]; struct bitfury_payload payload; struct freq_stat chip_stat; struct timeval timer1; struct timeval tv_stat; unsigned int counter1, counter2; double mhz; int mhz_last; int mhz_best; unsigned slot; unsigned fasync; unsigned strange_counter; bool force_reinit; int desync_counter; int sample_hwe; int sample_tot; }; extern void work_to_bitfury_payload(struct bitfury_payload *, struct work *); extern void bitfury_payload_to_atrvec(uint32_t *atrvec, struct bitfury_payload *); extern void bitfury_send_reinit(struct spi_port *, int slot, int chip_n, int n); extern void bitfury_send_shutdown(struct spi_port *, int slot, int chip_n); extern void bitfury_send_freq(struct spi_port *, int slot, int chip_n, int bits); extern int libbitfury_detectChips1(struct spi_port *); extern unsigned bitfury_decnonce(unsigned); extern bool bitfury_fudge_nonce(const void *midstate, const uint32_t m7, const uint32_t ntime, const uint32_t nbits, uint32_t *nonce_p); #endif /* __LIBBITFURY_H__ */ bfgminer-bfgminer-3.10.0/libblkmaker/000077500000000000000000000000001226556647300174525ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/libztex.c000066400000000000000000000567731226556647300170420ustar00rootroot00000000000000/* * Copyright 2012 nelisky * Copyright 2012-2013 Luke Dashjr * Copyright 2012-2013 Denis Ahrens~ * Copyright 2012 Peter Stuge~ * * This work is based upon the Java SDK provided by ztex which is * Copyright (C) 2009-2011 ZTEX GmbH. * http://www.ztex.de * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. * * The copyright status of some of this code is currently a bit confused. They * were initially released under a license (GPLv2 only) incompatible with the * rest of the program at the time (GPLv3 or newer), and I haven't had luck * getting in touch with some later contributors (denoted above with a tilde) to * clarify it. Since their modifications would have been a license violation, * I'm assuming it was just an innocent mistake on their part. */ #define _GNU_SOURCE #include "config.h" #include #include #include #include #include #include "binloader.h" #include "compat.h" #include "deviceapi.h" #include "dynclock.h" #include "miner.h" #include "libztex.h" #include "util.h" //* Capability index for EEPROM support. #define CAPABILITY_EEPROM 0,0 //* Capability index for FPGA configuration support. #define CAPABILITY_FPGA 0,1 //* Capability index for FLASH memory support. #define CAPABILITY_FLASH 0,2 //* Capability index for DEBUG helper support. #define CAPABILITY_DEBUG 0,3 //* Capability index for AVR XMEGA support. #define CAPABILITY_XMEGA 0,4 //* Capability index for AVR XMEGA support. #define CAPABILITY_HS_FPGA 0,5 //* Capability index for AVR XMEGA support. #define CAPABILITY_MAC_EEPROM 0,6 //* Capability index for multi FPGA support. #define CAPABILITY_MULTI_FPGA 0,7 static int libztex_get_string_descriptor_ascii(libusb_device_handle *dev, uint8_t desc_index, unsigned char *data, int length) { int i, cnt; uint16_t langid; unsigned char buf[260]; /* We open code string descriptor retrieval and ASCII decoding here * in order to work around that libusb_get_string_descriptor_ascii() * in the FreeBSD libusb implementation hits a bug in ZTEX firmware, * where the device returns more bytes than requested, causing babble, * which makes FreeBSD return an error to us. * * Avoid the mess by doing it manually the same way as libusb-1.0. */ cnt = libusb_control_transfer(dev, LIBUSB_ENDPOINT_IN, LIBUSB_REQUEST_GET_DESCRIPTOR, (LIBUSB_DT_STRING << 8) | 0, 0x0000, buf, sizeof(buf), 1000); if (cnt < 0) { applog(LOG_ERR, "%s: Failed to read LANGIDs: %s", __func__, bfg_strerror(cnt, BST_LIBUSB)); return cnt; } langid = libusb_le16_to_cpu(((uint16_t *)buf)[1]); cnt = libusb_control_transfer(dev, LIBUSB_ENDPOINT_IN, LIBUSB_REQUEST_GET_DESCRIPTOR, (LIBUSB_DT_STRING << 8) | desc_index, langid, buf, sizeof(buf), 1000); if (cnt < 0) { applog(LOG_ERR, "%s: Failed to read string descriptor: %s", __func__, bfg_strerror(cnt, BST_LIBUSB)); return cnt; } /* num chars = (all bytes except bLength and bDescriptorType) / 2 */ for (i = 0; i <= (cnt - 2) / 2 && i < length-1; i++) data[i] = buf[2 + i*2]; data[i] = 0; return LIBUSB_SUCCESS; } static bool libztex_firmwareReset(struct libusb_device_handle *hndl, bool enable) { uint8_t reset = enable ? 1 : 0; int cnt = libusb_control_transfer(hndl, 0x40, 0xA0, 0xE600, 0, &reset, 1, 1000); if (cnt < 0) { applog(LOG_ERR, "Ztex reset %d failed: %s", enable, bfg_strerror(cnt, BST_LIBUSB)); return 1; } return 0; } enum ztex_check_result libztex_checkDevice(struct libusb_device *dev) { libusb_device_handle *hndl = NULL; struct libusb_device_descriptor desc; int ret = CHECK_ERROR, err, cnt; unsigned char buf[64]; unsigned int i; bytes_t bsdata = BYTES_INIT; err = libusb_get_device_descriptor(dev, &desc); if (unlikely(err != 0)) { applog(LOG_ERR, "Ztex check device: Failed to open read descriptor with error %d", err); return CHECK_ERROR; } if (desc.idVendor != LIBZTEX_IDVENDOR || desc.idProduct != LIBZTEX_IDPRODUCT) { applog(LOG_DEBUG, "Not a ZTEX device %04x:%04x", desc.idVendor, desc.idProduct); return CHECK_IS_NOT_ZTEX; } err = libusb_open(dev, &hndl); if (err != LIBUSB_SUCCESS) { applog(LOG_ERR, "%s: Can not open ZTEX device: %s", __func__, bfg_strerror(err, BST_LIBUSB)); goto done; } if (libusb_claim_interface(hndl, 0) == LIBUSB_ERROR_BUSY) { applog(LOG_DEBUG, "Ztex check device: Interface already busy, skipping"); goto done; } cnt = libusb_control_transfer(hndl, 0xc0, 0x22, 0, 0, buf, 40, 500); if (unlikely(cnt < 0)) { applog(LOG_ERR, "Ztex check device: Failed to read ztex descriptor with err %d", cnt); goto done; } if (buf[0] != 40 || buf[1] != 1 || buf[2] != 'Z' || buf[3] != 'T' || buf[4] != 'E' || buf[5] != 'X') { applog(LOG_ERR, "Ztex check device: Error reading ztex descriptor"); goto done; } if (buf[6] != 10) { ret = CHECK_IS_NOT_ZTEX; goto done; } // 15 = 1.15y 13 = 1.15d or 1.15x switch(buf[7]) { case 13: applog(LOG_ERR, "Found ztex board 1.15d or 1.15x"); break; case 15: applog(LOG_ERR, "Found ztex board 1.15y"); break; default: applog(LOG_ERR, "Found unknown ztex board"); ret = CHECK_IS_NOT_ZTEX; goto done; } // testing for dummy firmware if (buf[8] != 0) { ret = CHECK_OK; goto done; } applog(LOG_ERR, "Found dummy firmware, trying to send mining firmware"); char productString[32]; cnt = libztex_get_string_descriptor_ascii(hndl, desc.iProduct, (unsigned char*)productString, sizeof(productString)); if (unlikely(cnt < 0)) { applog(LOG_ERR, "Ztex check device: Failed to read device productString with err %d", cnt); return cnt; } applog(LOG_ERR, "productString: %s", productString); unsigned char productID2 = buf[7]; char *firmware = NULL; if (strcmp("USB-FPGA Module 1.15d (default)", productString) == 0 && productID2 == 13) { firmware = "ztex_ufm1_15d4"; } else if (strcmp("USB-FPGA Module 1.15x (default)", productString) == 0 && productID2 == 13) { firmware = "ztex_ufm1_15d4"; } else if (strcmp("USB-FPGA Module 1.15y (default)", productString) == 0 && productID2 == 15) { firmware = "ztex_ufm1_15y1"; } if (firmware == NULL) { applog(LOG_ERR, "could not figure out which firmware to use"); goto done; } applog(LOG_ERR, "Mining firmware filename: %s", firmware); bytes_init(&bsdata); if (!load_bitstream_bytes(&bsdata, "ztex", "ZTX *", firmware)) goto done; // in buf[] is still the identifier of the dummy firmware // use it to compare it with the new firmware char *rv = memmem(bytes_buf(&bsdata), bytes_len(&bsdata), buf, 8); if (rv == NULL) { applog(LOG_ERR, "%s: found firmware is not ZTEX", __func__); goto done; } // check for dummy firmware if (rv[8] == 0) { applog(LOG_ERR, "%s: found a ZTEX dummy firmware", __func__); goto done; } if (libztex_firmwareReset(hndl, true)) goto done; for (i = 0; i < bytes_len(&bsdata); i+= 256) { // firmware wants data in small chunks like 256 bytes int numbytes = (bytes_len(&bsdata) - i) < 256 ? (bytes_len(&bsdata) - i) : 256; int k = libusb_control_transfer(hndl, 0x40, 0xA0, i, 0, bytes_buf(&bsdata) + i, numbytes, 1000); if (k < numbytes) { applog(LOG_ERR, "Ztex device: Failed to write firmware at %d with: %s", i, bfg_strerror(k, BST_LIBUSB)); goto done; } } if (libztex_firmwareReset(hndl, false)) goto done; applog(LOG_ERR, "Ztex device: succesfully wrote firmware"); ret = CHECK_RESCAN; done: bytes_free(&bsdata); if (hndl) { libusb_release_interface(hndl, 0); libusb_close(hndl); } return ret; } static bool libztex_checkCapability(struct libztex_device *ztex, int i, int j) { if (!((i >= 0) && (i <= 5) && (j >= 0) && (j < 8) && (((ztex->interfaceCapabilities[i] & 255) & (1 << j)) != 0))) { applog(LOG_ERR, "%s: capability missing: %d %d", ztex->repr, i, j); return false; } return true; } static char libztex_detectBitstreamBitOrder(const unsigned char *buf, int size) { int i; for (i = 0; i < size - 4; i++) { if (((buf[i] & 255) == 0xaa) && ((buf[i + 1] & 255) == 0x99) && ((buf[i + 2] & 255) == 0x55) && ((buf[i + 3] & 255) == 0x66)) return 1; if (((buf[i] & 255) == 0x55) && ((buf[i + 1] & 255) == 0x99) && ((buf[i + 2] & 255) == 0xaa) && ((buf[i + 3] & 255) == 0x66)) return 0; } applog(LOG_WARNING, "Unable to determine bitstream bit order: no signature found"); return 0; } static void libztex_swapBits(unsigned char *buf, int size) { unsigned char c; int i; for (i = 0; i < size; i++) { c = buf[i]; buf[i] = ((c & 128) >> 7) | ((c & 64) >> 5) | ((c & 32) >> 3) | ((c & 16) >> 1) | ((c & 8) << 1) | ((c & 4) << 3) | ((c & 2) << 5) | ((c & 1) << 7); } } static int libztex_getFpgaState(struct libztex_device *ztex, struct libztex_fpgastate *state) { unsigned char buf[9]; int cnt; if (!libztex_checkCapability(ztex, CAPABILITY_FPGA)) return -1; cnt = libusb_control_transfer(ztex->hndl, 0xc0, 0x30, 0, 0, buf, 9, 1000); if (unlikely(cnt < 0)) { applog(LOG_ERR, "%s: Failed getFpgaState with err %d", ztex->repr, cnt); return cnt; } state->fpgaConfigured = (buf[0] == 0); state->fpgaChecksum = buf[1] & 0xff; state->fpgaBytes = ((buf[5] & 0xff) << 24) | ((buf[4] & 0xff) << 16) | ((buf[3] & 0xff) << 8) | (buf[2] & 0xff); state->fpgaInitB = buf[6] & 0xff; state->fpgaFlashResult = buf[7]; state->fpgaFlashBitSwap = (buf[8] != 0); return 0; } static int libztex_configureFpgaHS(struct libztex_device *ztex, const char* firmware, bool force, char bs, const char *repr) { struct libztex_fpgastate state; const int transactionBytes = 65536; unsigned char buf[transactionBytes], settings[2]; int tries, cnt, err; FILE *fp; if (!libztex_checkCapability(ztex, CAPABILITY_HS_FPGA)) return -1; libztex_getFpgaState(ztex, &state); if (!force && state.fpgaConfigured) { applog(LOG_INFO, "Bitstream already configured"); return 0; } cnt = libusb_control_transfer(ztex->hndl, 0xc0, 0x33, 0, 0, settings, 2, 1000); if (unlikely(cnt < 0)) { applog(LOG_ERR, "%s: Failed getHSFpgaSettings with err %d", ztex->repr, cnt); return cnt; } err = libusb_claim_interface(ztex->hndl, settings[1]); if (err != LIBUSB_SUCCESS) { applog(LOG_ERR, "%s: failed to claim interface for hs transfer", ztex->repr); return -4; } for (tries = 3; tries > 0; tries--) { fp = open_bitstream("ztex", firmware); if (!fp) { applog(LOG_ERR, "%"PRIpreprv": failed to read bitstream '%s'", repr, firmware); libusb_release_interface(ztex->hndl, settings[1]); return -2; } libusb_control_transfer(ztex->hndl, 0x40, 0x34, 0, 0, NULL, 0, 1000); // 0x34 - initHSFPGAConfiguration do { int length = fread(buf,1,transactionBytes,fp); if (bs != 0 && bs != 1) bs = libztex_detectBitstreamBitOrder(buf, length); if (bs == 1) libztex_swapBits(buf, length); err = libusb_bulk_transfer(ztex->hndl, settings[0], buf, length, &cnt, 1000); if (cnt != length) applog(LOG_ERR, "%s: cnt != length", ztex->repr); if (err != 0) applog(LOG_ERR, "%s: Failed send hs fpga data", ztex->repr); } while (!feof(fp)); // While 1.15y can finish immediately, at least 1.15x needs some delay // (200ms might be enough, but 500ms is safer) if (ztex->productId[1] != 15) usleep(500); libusb_control_transfer(ztex->hndl, 0x40, 0x35, 0, 0, NULL, 0, 1000); // 0x35 - finishHSFPGAConfiguration if (cnt >= 0) tries = 0; fclose(fp); libztex_getFpgaState(ztex, &state); if (!state.fpgaConfigured) { applog(LOG_ERR, "%"PRIpreprv": HS FPGA configuration failed: DONE pin does not go high", repr); libusb_release_interface(ztex->hndl, settings[1]); return -3; } } libusb_release_interface(ztex->hndl, settings[1]); cgsleep_ms(200); applog(LOG_INFO, "%"PRIpreprv": HS FPGA configuration done", repr); return 0; } static int libztex_configureFpgaLS(struct libztex_device *ztex, const char* firmware, bool force, char bs, const char *repr) { struct libztex_fpgastate state; const int transactionBytes = 2048; unsigned char buf[transactionBytes]; int tries, cnt; FILE *fp; if (!libztex_checkCapability(ztex, CAPABILITY_FPGA)) return -1; libztex_getFpgaState(ztex, &state); if (!force && state.fpgaConfigured) { applog(LOG_DEBUG, "Bitstream already configured"); return 0; } for (tries = 10; tries > 0; tries--) { fp = open_bitstream("ztex", firmware); if (!fp) { _bitstream_not_found(repr, firmware); return -2; } //* Reset fpga cnt = libztex_resetFpga(ztex); if (unlikely(cnt < 0)) { applog(LOG_ERR, "%s: Failed reset fpga with err %d", ztex->repr, cnt); continue; } do { int length = fread(buf, 1, transactionBytes, fp); if (bs != 0 && bs != 1) bs = libztex_detectBitstreamBitOrder(buf, length); if (bs == 1) libztex_swapBits(buf, length); cnt = libusb_control_transfer(ztex->hndl, 0x40, 0x32, 0, 0, buf, length, 5000); if (cnt != length) { applog(LOG_ERR, "%s: Failed send ls fpga data", ztex->repr); break; } } while (!feof(fp)); if (cnt > 0) tries = 0; fclose(fp); } libztex_getFpgaState(ztex, &state); if (!state.fpgaConfigured) { applog(LOG_ERR, "%"PRIpreprv": LS FPGA configuration failed: DONE pin does not go high", repr); return -3; } cgsleep_ms(200); applog(LOG_INFO, "%"PRIpreprv": FPGA configuration done", repr); return 0; } int libztex_configureFpga(struct libztex_device *ztex, const char *repr) { char buf[256]; int rv; strcpy(buf, ztex->bitFileName); strcat(buf, ".bit"); rv = libztex_configureFpgaHS(ztex, buf, true, 2, repr); if (rv != 0) rv = libztex_configureFpgaLS(ztex, buf, true, 2, repr); if (!rv) if (libusb_claim_interface(ztex->hndl, 0) == LIBUSB_ERROR_BUSY) rv = -5; return rv; } int libztex_numberOfFpgas(struct libztex_device *ztex) { int cnt; unsigned char buf[3]; if (ztex->numberOfFpgas < 0) { if (libztex_checkCapability(ztex, CAPABILITY_MULTI_FPGA)) { cnt = libusb_control_transfer(ztex->hndl, 0xc0, 0x50, 0, 0, buf, 3, 1000); if (unlikely(cnt < 0)) { applog(LOG_ERR, "%s: Failed getMultiFpgaInfo with err %d", ztex->repr, cnt); return cnt; } ztex->numberOfFpgas = buf[0] + 1; ztex->selectedFpga = -1;//buf[1]; ztex->parallelConfigSupport = (buf[2] == 1); } else { ztex->numberOfFpgas = 1; ztex->selectedFpga = -1;//0; ztex->parallelConfigSupport = false; } } return ztex->numberOfFpgas; } int libztex_selectFpga(struct libztex_device *ztex, int16_t number) { int cnt, fpgacnt = libztex_numberOfFpgas(ztex->root); if (number < 0 || number >= fpgacnt) { applog(LOG_WARNING, "%s: Trying to select wrong fpga (%d in %d)", ztex->repr, number, fpgacnt); return 1; } if (ztex->root->selectedFpga != number && libztex_checkCapability(ztex->root, CAPABILITY_MULTI_FPGA)) { cnt = libusb_control_transfer(ztex->root->hndl, 0x40, 0x51, (uint16_t)number, 0, NULL, 0, 500); if (unlikely(cnt < 0)) { applog(LOG_ERR, "Ztex check device: Failed to set fpga with err %d", cnt); ztex->root->selectedFpga = -1; return cnt; } ztex->root->selectedFpga = number; } return 0; } int libztex_setFreq(struct libztex_device *ztex, uint16_t freq, const char *repr) { int cnt; uint16_t oldfreq = ztex->dclk.freqM; if (freq > ztex->dclk.freqMaxM) freq = ztex->dclk.freqMaxM; cnt = libusb_control_transfer(ztex->hndl, 0x40, 0x83, freq, 0, NULL, 0, 500); if (unlikely(cnt < 0)) { applog(LOG_ERR, "Ztex check device: Failed to set frequency with err %d", cnt); return cnt; } ztex->dclk.freqM = freq; if (oldfreq > ztex->dclk.freqMaxM) applog(LOG_WARNING, "%"PRIpreprv": Frequency set to %u MHz (range: %u-%u)", repr, (unsigned)(ztex->freqM1 * (ztex->dclk.freqM + 1)), (unsigned)ztex->freqM1, (unsigned)(ztex->freqM1 * (ztex->dclk.freqMaxM + 1)) ); else dclk_msg_freqchange(repr, ztex->freqM1 * (oldfreq + 1), ztex->freqM1 * (ztex->dclk.freqM + 1), NULL); return 0; } int libztex_resetFpga(struct libztex_device *ztex) { return libusb_control_transfer(ztex->hndl, 0x40, 0x31, 0, 0, NULL, 0, 1000); } int libztex_suspend(struct libztex_device *ztex) { if (ztex->suspendSupported) { return libusb_control_transfer(ztex->hndl, 0x40, 0x84, 0, 0, NULL, 0, 1000); } else { return 0; } } struct libztex_device *libztex_prepare_device2(struct libusb_device * const dev) { struct libztex_device *newdev; int i, cnt, err; unsigned char buf[64]; newdev = malloc(sizeof(*newdev)); if (!newdev) applogr(NULL, LOG_ERR, "%s: Failed to malloc libztex_device", __func__); newdev->bitFileName = NULL; newdev->numberOfFpgas = -1; dclk_prepare(&newdev->dclk); newdev->dclk.freqMinM = 0; err = libusb_open(dev, &newdev->hndl); if (err != LIBUSB_SUCCESS) { applog(LOG_ERR, "%s: Can not open ZTEX device: %s", __func__, bfg_strerror(err, BST_LIBUSB)); return NULL; } err = libusb_get_device_descriptor(dev, &newdev->descriptor); if (unlikely(err != 0)) { applogr(NULL, LOG_ERR, "%s: Failed to open read descriptor: %s", __func__, bfg_strerror(err, BST_LIBUSB)); } cnt = libztex_get_string_descriptor_ascii(newdev->hndl, newdev->descriptor.iSerialNumber, newdev->snString, sizeof(newdev->snString)); if (unlikely(cnt < 0)) { applogr(NULL, LOG_ERR, "%s: Failed to read device snString: %s", __func__, bfg_strerror(cnt, BST_LIBUSB)); } cnt = libztex_get_string_descriptor_ascii(newdev->hndl, newdev->descriptor.iProduct, buf, sizeof(buf)); if (unlikely(cnt < 0)) applog(LOG_WARNING, "%s: Failed to read device product: %s", __func__, bfg_strerror(cnt, BST_LIBUSB)); else newdev->dev_product = buf[0] ? strdup((void*)buf) : NULL; cnt = libztex_get_string_descriptor_ascii(newdev->hndl, newdev->descriptor.iManufacturer, buf, sizeof(buf)); if (unlikely(cnt < 0)) applog(LOG_WARNING, "%s: Failed to read device manufacturer: %s", __func__, bfg_strerror(cnt, BST_LIBUSB)); else newdev->dev_manufacturer = buf[0] ? strdup((void*)buf) : NULL; cnt = libusb_control_transfer(newdev->hndl, 0xc0, 0x22, 0, 0, buf, 40, 500); if (unlikely(cnt < 0)) { applogr(NULL, LOG_ERR, "%s: Failed to read ztex descriptor: %s", __func__, bfg_strerror(cnt, BST_LIBUSB)); } if (buf[0] != 40 || buf[1] != 1 || buf[2] != 'Z' || buf[3] != 'T' || buf[4] != 'E' || buf[5] != 'X') { applogr(NULL, LOG_ERR, "%s: Unexpected data reading ztex descriptor", __func__); } newdev->productId[0] = buf[6]; newdev->productId[1] = buf[7]; newdev->productId[2] = buf[8]; newdev->productId[3] = buf[9]; newdev->fwVersion = buf[10]; newdev->interfaceVersion = buf[11]; newdev->interfaceCapabilities[0] = buf[12]; newdev->interfaceCapabilities[1] = buf[13]; newdev->interfaceCapabilities[2] = buf[14]; newdev->interfaceCapabilities[3] = buf[15]; newdev->interfaceCapabilities[4] = buf[16]; newdev->interfaceCapabilities[5] = buf[17]; newdev->moduleReserved[0] = buf[18]; newdev->moduleReserved[1] = buf[19]; newdev->moduleReserved[2] = buf[20]; newdev->moduleReserved[3] = buf[21]; newdev->moduleReserved[4] = buf[22]; newdev->moduleReserved[5] = buf[23]; newdev->moduleReserved[6] = buf[24]; newdev->moduleReserved[7] = buf[25]; newdev->moduleReserved[8] = buf[26]; newdev->moduleReserved[9] = buf[27]; newdev->moduleReserved[10] = buf[28]; newdev->moduleReserved[11] = buf[29]; cnt = libusb_control_transfer(newdev->hndl, 0xc0, 0x82, 0, 0, buf, 64, 500); if (unlikely(cnt < 0)) { applogr(NULL, LOG_ERR, "%s: Failed to read ztex descriptor: %s", __func__, bfg_strerror(cnt, BST_LIBUSB)); } if (unlikely(buf[0] != 5)) { if (unlikely(buf[0] != 2 && buf[0] != 4)) { applogr(NULL, LOG_ERR, "%s: Invalid BTCMiner descriptor version (%d). Firmware must be updated.", __func__, buf[0]); return NULL; } applog(LOG_WARNING, "%s: Firmware out of date (%d).", __func__, buf[0]); } i = buf[0] > 4? 11: (buf[0] > 2? 10: 8); while (cnt < 64 && buf[cnt] != 0) cnt++; if (cnt < i + 1) { applogr(NULL, LOG_ERR, "%s: Invalid bitstream file name.", __func__); } newdev->bitFileName = malloc(sizeof(char) * (cnt + 1)); memcpy(newdev->bitFileName, &buf[i], cnt); newdev->bitFileName[cnt] = 0; newdev->numNonces = buf[1] + 1; newdev->offsNonces = ((buf[2] & 255) | ((buf[3] & 255) << 8)) - 10000; newdev->freqM1 = ((buf[4] & 255) | ((buf[5] & 255) << 8) ) * 0.01; newdev->dclk.freqMaxM = (buf[7] & 255); newdev->dclk.freqM = (buf[6] & 255); newdev->dclk.freqMDefault = newdev->dclk.freqM; newdev->suspendSupported = (buf[0] == 5); newdev->hashesPerClock = buf[0] > 2? (((buf[8] & 255) | ((buf[9] & 255) << 8)) + 1) / 128.0: 1.0; newdev->extraSolutions = buf[0] > 4? buf[10]: 0; applog(LOG_DEBUG, "%s: PID: %d numNonces: %d offsNonces: %d freqM1: %f freqMaxM: %d freqM: %d suspendSupported: %s hashesPerClock: %f extraSolutions: %d", __func__, buf[0], newdev->numNonces, newdev->offsNonces, newdev->freqM1, newdev->dclk.freqMaxM, newdev->dclk.freqM, newdev->suspendSupported ? "T": "F", newdev->hashesPerClock, newdev->extraSolutions); if (buf[0] < 4) { if (strncmp(newdev->bitFileName, "ztex_ufm1_15b", 13) != 0) newdev->hashesPerClock = 0.5; applog(LOG_WARNING, "%s: HASHES_PER_CLOCK not defined, assuming %0.2f", __func__, newdev->hashesPerClock); } newdev->usbbus = libusb_get_bus_number(dev); newdev->usbaddress = libusb_get_device_address(dev); sprintf(newdev->repr, "ZTEX %s-1", newdev->snString); return newdev; } void libztex_destroy_device(struct libztex_device* ztex) { if (ztex->hndl != NULL) { libusb_release_interface(ztex->hndl, 0); libusb_close(ztex->hndl); ztex->hndl = NULL; } if (ztex->bitFileName != NULL) { free(ztex->bitFileName); ztex->bitFileName = NULL; } free(ztex); } int libztex_sendHashData(struct libztex_device *ztex, unsigned char *sendbuf) { int cnt = 0, ret, len; if (ztex == NULL || ztex->hndl == NULL) return 0; ret = 44; len = 0; while (ret > 0) { cnt = libusb_control_transfer(ztex->hndl, 0x40, 0x80, 0, 0, sendbuf + len, ret, 1000); if (cnt >= 0) { ret -= cnt; len += cnt; } else break; } if (unlikely(cnt < 0)) applog(LOG_ERR, "%s: Failed sendHashData with err %d", ztex->repr, cnt); return cnt; } int libztex_readHashData(struct libztex_device *ztex, struct libztex_hash_data nonces[]) { int bufsize = 12 + ztex->extraSolutions * 4; int cnt = 0, i, j, ret, len; unsigned char *rbuf; if (ztex->hndl == NULL) return 0; rbuf = malloc(sizeof(unsigned char) * (ztex->numNonces * bufsize)); if (rbuf == NULL) { applog(LOG_ERR, "%s: Failed to allocate memory for reading nonces", ztex->repr); return 0; } ret = bufsize * ztex->numNonces; len = 0; while (ret > 0) { cnt = libusb_control_transfer(ztex->hndl, 0xc0, 0x81, 0, 0, rbuf + len, ret, 1000); if (cnt >= 0) { ret -= cnt; len += cnt; } else break; } if (unlikely(cnt < 0)) { applog(LOG_ERR, "%s: Failed readHashData with err %d", ztex->repr, cnt); free(rbuf); return cnt; } for (i=0; inumNonces; i++) { uint32_t *nonce_data = (void*)&rbuf[i * bufsize]; nonces[i].goldenNonce[0] = nonce_data[0] - ztex->offsNonces; //applog(LOG_DEBUG, "W %d:0 %0.8x", i, nonces[i].goldenNonce[0]); nonces[i].nonce = le32toh(nonce_data[1]) - ztex->offsNonces; nonces[i].hash7 = le32toh(nonce_data[2]); for (j = 1; j <= ztex->extraSolutions; ++j) { nonces[i].goldenNonce[j] = le32toh(nonce_data[2 + j]) - ztex->offsNonces; //applog(LOG_DEBUG, "W %d:%d %0.8x", i, j, nonces[i].goldenNonce[j]); } } free(rbuf); return cnt; } void libztex_freeDevList(struct libztex_dev_list **devs) { bool done = false; ssize_t cnt = 0; while (!done) { if (devs[cnt]->next == NULL) done = true; free(devs[cnt++]); } free(devs); } bfgminer-bfgminer-3.10.0/libztex.h000066400000000000000000000045471226556647300170370ustar00rootroot00000000000000#ifndef __LIBZTEX_H__ #define __LIBZTEX_H__ #include #include #include #include "dynclock.h" #define LIBZTEX_MAX_DESCRIPTORS 512 #define LIBZTEX_SNSTRING_LEN 10 #define LIBZTEX_IDVENDOR 0x221A #define LIBZTEX_IDPRODUCT 0x0100 struct libztex_fpgastate { bool fpgaConfigured; unsigned char fpgaChecksum; uint16_t fpgaBytes; unsigned char fpgaInitB; unsigned char fpgaFlashResult; bool fpgaFlashBitSwap; }; struct libztex_device { pthread_mutex_t mutex; struct libztex_device *root; struct libusb_device_descriptor descriptor; libusb_device_handle *hndl; unsigned char usbbus; unsigned char usbaddress; char *dev_manufacturer; char *dev_product; unsigned char snString[LIBZTEX_SNSTRING_LEN+1]; unsigned char productId[4]; unsigned char fwVersion; unsigned char interfaceVersion; unsigned char interfaceCapabilities[6]; unsigned char moduleReserved[12]; uint8_t numNonces; uint16_t offsNonces; double freqM1; char* bitFileName; bool suspendSupported; double hashesPerClock; uint8_t extraSolutions; struct dclk_data dclk; int16_t numberOfFpgas; int handles; int selectedFpga; bool parallelConfigSupport; char repr[20]; }; struct libztex_dev_list { struct libztex_device *dev; struct libztex_dev_list *next; }; struct libztex_hash_data { uint32_t goldenNonce[2]; uint32_t nonce; uint32_t hash7; }; enum ztex_check_result { CHECK_ERROR, CHECK_IS_NOT_ZTEX, CHECK_OK, CHECK_RESCAN, }; extern int libztex_scanDevices (struct libztex_dev_list ***devs); extern void libztex_freeDevList (struct libztex_dev_list **devs); extern enum ztex_check_result libztex_checkDevice(struct libusb_device *); extern struct libztex_device *libztex_prepare_device2(struct libusb_device *); extern void libztex_destroy_device (struct libztex_device* ztex); extern int libztex_configureFpga (struct libztex_device *dev, const char *repr); extern int libztex_setFreq (struct libztex_device *ztex, uint16_t freq, const char *repr); extern int libztex_sendHashData (struct libztex_device *ztex, unsigned char *sendbuf); extern int libztex_readHashData (struct libztex_device *ztex, struct libztex_hash_data nonces[]); extern int libztex_resetFpga (struct libztex_device *ztex); extern int libztex_selectFpga(struct libztex_device *ztex, int16_t fpgaNum); extern int libztex_numberOfFpgas(struct libztex_device *ztex); #endif /* __LIBZTEX_H__ */ bfgminer-bfgminer-3.10.0/linux-usb-bfgminer000066400000000000000000000246701226556647300206440ustar00rootroot00000000000000How to setup a BFGMiner using Xubuntu 11.04 live on a USB The master version of this document is here: https://github.com/luke-jr/bfgminer/blob/master/linux-usb-bfgminer The original old version on bitcointalk is: https://bitcointalk.org/index.php?topic=28402.msg426741#msg426741 ======== I have said to select English for the install process for 2 reasons: 1) I don't know any other spoken language very well and 2) I'm not sure what problems installing under a different language might cause (it will probably cause no problems but I don't know) Software ======== Short hardware comment: Your mining computer doesn't need any HDD or CD/DVD/BD as long as it has at least 2GB of RAM, can boot USB, has some network connection to the internet and of course a reasonable mining ATI graphics card ... Or you can boot a windows PC with the USB to only do mining ... and ignore the system HDD ... wasting energy running the HDD (roughly 10 Watts per HDD) :) If you wish to install to an HDD instead of a USB, see the changes to the instructions at the end To create the USB, you need of course a 4GB USB and temporarily need a PC with a CD (or DVD/BD) writer, a USB port and of course an internet connection to the PC 1) Download the xubuntu 11.04 desktop live CD iso for amd64 ( look here for mirrors: http://www.xubuntu.org/getubuntu ) 2) Burn it to CD then boot that temporarily on any PC with a CD/DVD/BD and a USB port (this and the next 2 step won't effect that PC) Select "English" then select "Try Xubuntu without installing" and wait for the desktop to appear (this happens by default if you wait for the timeouts) 3) Plug in your 4GB USB device and it should appear on the desktop - you can leave it's contents as long as there is at least 2.8GB free 4) Now run "Startup Disk Creator" in "Applications->System" (the system menu is the little rat in the top left corner) (if you have no mouse you can get the menu with and navigate the menu with the arrow keys and key) From here select the boot CD as the "Source" and the USB as the "Disk to use" lastly move the slider to 2GB for reserved extra space The 2GB should be enough for modifications Click: "Make Install Disk" After about 10-15 minutes you have a base xubuntu 11.04 boot USB (you can shut down this computer now) 5) Boot your BFGMiner PC with this USB stick, select "English" then select "Try Xubuntu without installing" and wait for the desktop to appear (this happens by default if you wait for the timeouts) 6) Start a terminal "Applications->Accessories->Terminal Emulator" 7) sudo apt-get install openssh-server screen if you have a problem here then it's probably coz the internet isn't available ... sort that out by reading elsewhere about routers etc 8) sudo apt-get install fglrx fglrx-amdcccle fglrx-dev sudo sync sudo shutdown -r now N.B. always do a "sudo sync" and wait for it to finish every time before shutting down the PC to ensure all data is written to the USB 9) sudo aticonfig --lsa this lists your ATI cards so you can see them sudo aticonfig --adapter=all --odgt this checks it can access all the cards ... 10) sudo aticonfig --adapter=all --initial this gets an error - no idea why but the xorg.conf is OK sudo sync sudo shutdown -r now 11) sudo aticonfig --adapter=all --odgt this checks it can access all the cards ... 12) get AMD-APP-SDK-v2.8-lnx64.tgz from http://developer.amd.com/tools/heterogeneous-computing/amd-accelerated-parallel-processing-app-sdk/downloads/ ( http://developer.amd.com/wordpress/media/2012/11/AMD-APP-SDK-v2.8-lnx64.tgz ) sudo su cd /opt (replace /home/ubuntu/ with wherever you put the file: ) tar -xvzf /home/ubuntu/AMD-APP-SDK-v2.8-lnx64.tgz cd AMD-APP-SDK-v2.8-lnx64/ cp -pv lib/x86_64/* /usr/lib/ rsync -avl include/CL/ /usr/include/CL/ tar -xvzf icd-registration.tgz rsync -avl etc/OpenCL/ /etc/OpenCL/ ldconfig sync shutdown -r now You now have an OpenCL enabled xubuntu 13) BFGMiner: sudo apt-get install curl get the binary Ubuntu BFGMiner from PPA https://launchpad.net/~unit3/+archive/bfgminer ./bfgminer -n this shows you the GPU's it found on your PC See further below if you get an error regarding libtinfo.so.5 14) An OC option: This is no longer needed since BFGMiner 2.* includes OC, however: sudo apt-get install libwxbase2.8-0 libwxgtk2.8-0 http://sourceforge.net/projects/amdovdrvctrl/ for an Over/underclocking application and get the file listed below then: sudo dpkg -i amdoverdrivectrl_1.2.1_amd64.deb 15) set the screen saver to ONLY blank ... Move the mouse to the bottom of the screen and you see a set of icons like on an Apple PC Click on Settings, then in the Settings window "Screensaver" Set "Mode:" to "Blank Screen Only" 16) apt-get install ntpd An accurate clock is always a good idea :) 17) if you wish to ssh into the box you must set a password to do this you simply have to be logged into it at the screen and type sudo passwd ubuntu it will prompt you (twice) to enter a password for the ubuntu account Initial setup complete. ======== If you want to SSH into the machine and run BFGMiner: From a terminal on the miner display each time after you boot: xhost + 'xhost +' isn't needed if you ssh into the machine with the same username that the GUI boots into (which is 'ubuntu' in this case) Then after you ssh into the machine: export DISPLAY=:0 before running BFGMiner Also note, that you should force the screen to blank when mining if the ATI card is displaying the screen (using the screen saver application menu) In my case it takes away 50Mh/s when the screen isn't blanked It will auto blank - but make sure the blank is of course just blank as mentioned above at 15) This is of course just the basics ... but it should get you a computer up and running and able to run BFGMiner ======== You should keep an eye on USB disk space The system logger writes log files in the /var/log/ directory The two main ones that grow large are 'kern.log' and 'syslog' If you want to keep them, save them away to some other computer When space is low, just delete them e.g. sudo rm -i /var/log/syslog sudo rm -i /var/log/kern.log The 'df' command will show you the current space e.g.: sudo df Filesystem 1K-blocks Used Available Use% Mounted on aufs 2099420 892024 1100748 45% / none 1015720 628 1015092 1% /dev /dev/sda1 3909348 2837248 1072100 73% /cdrom /dev/loop0 670848 670848 0 100% /rofs none 1023772 136 1023636 1% /dev/shm tmpfs 1023772 16 1023756 1% /tmp none 1023772 124 1023648 1% /var/run none 1023772 0 1023772 0% /var/lock This shows the 2GB space allocated when you setup the USB as '/' (aufs) In this example, it's currently 45% full with almost 1.1GB of free space ======== The latest version (2.0.8) of BFGMiner is built with 11.10 (not 11.04) If you get the following error when running the prebuilt version in 11.04: ./bfgminer: error while loading shared libraries: libtinfo.so.5: cannot open shared object file: No such file or directory The fix is to simply link the old curses library to the new name e.g.: cd /lib64/ sudo ln -s libncurses.so.5 libtinfo.so.5 ======== If you wish to install to an HDD instead of a USB: -------------------------------------------------- As per before: 1) Download the xubuntu 11.04 desktop live CD iso for amd64 ( look here for mirrors: http://www.xubuntu.org/getubuntu ) Then: 2) Burn it to CD then boot that on your new mining PC Select "English" then select "Install Xubuntu" (you have 30 seconds to do this) 3) When the Install window comes up - again select "English" and click "Forward" 4) The next page will show you if you meet certain install requirements (make sure you do meet them all) Don't select the download option The 3rd party option isn't needed for mining so ignore that also Click "Forward" 5) With "Allocate drive space" it's probably easiest to say to use the "Erase" option. This is just for mining right? :) However, if you have anything on the HDD that you want to keep - the "Erase" install process will delete it - so back it up (quit the install) Also make sure there are no OTHER HDD attached that it may erase also i.e. only have attached the one HDD that you want to install onto unless you know exactly what you are doing If you see the "Install Xubuntu 11.04 alongside 'something'" then that just means that the HDD wasn't blank. If you want to try this option - do that yourself and then skip to step 7) below when you get to that. There are plenty of other options available if you select "Something else" but I'm not going to go into all the details here other than to say that my preferred partioning is: /boot = 1GB = ext2, swap = twice memory size, / = 100GB = ext3 and the rest: /extra = ext3 Click "Forward" 6) If you selected "Erase" then it allows you to choose the drive to install to Then click "Install Now" 7) "Where are you?" sort that out then click "Forward" 8) "Keyboard layout" sort that out (use the default) then click "Forward" 9) "Who are you?" The important one here is "Pick a username:" coz that's the name you will need to ssh into, to access it remotely (and of course the "Choose a Password" you set) If you set the "username" to anything but "ubuntu" then: wherever in this document I have mentioned the username "ubuntu" you must of course use the username you chose here instead of "ubuntu" Important: set it to "log in automatically" if you ever want to be able to start BFGMiner without being in front of the computer since 'X' must be running to use BFGMiner properly That does of course mean that the computer isn't secure from anyone who has access to it - but then again no computer that can automatically reboot is secure from anyone who has access to the actual computer itself Then click "Forward" 10) Of course when it completes click on "Restart Now" ... and remove the Xubuntu CD when it asks you 11) Wait for it to finish rebooting ... and it will auto login (unless you didn't do step 9) "Important:") 12) After it logs in, an upgrade popup for 11.10 (or later) will appear Select "Don't Upgrade" 13) Now go to step 6) of the USB script above for what to do next and that covers everything else needed bfgminer-bfgminer-3.10.0/logging.c000066400000000000000000000043741226556647300167750ustar00rootroot00000000000000/* * Copyright 2011-2012 Con Kolivas * Copyright 2012-2013 Luke Dashjr * Copyright 2013 Andrew Smith * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include "compat.h" #include "logging.h" #include "miner.h" bool opt_debug = false; bool opt_debug_console = false; // Only used if opt_debug is also enabled bool opt_log_output = false; bool opt_log_microseconds; /* per default priorities higher than LOG_NOTICE are logged */ int opt_log_level = LOG_NOTICE; static void _my_log_curses(int prio, const char *datetime, const char *str) { #ifdef HAVE_CURSES extern bool use_curses; if (use_curses && _log_curses_only(prio, datetime, str)) ; else #endif printf(" %s %s%s", datetime, str, " \n"); } /* high-level logging function, based on global opt_log_level */ /* * log function */ void _applog(int prio, const char *str) { #ifdef HAVE_SYSLOG_H if (use_syslog) { syslog(prio, "%s", str); } #else if (0) {} #endif else { bool writetocon = (opt_debug_console || (opt_log_output && prio != LOG_DEBUG) || prio <= LOG_NOTICE) && !(opt_quiet && prio != LOG_ERR); bool writetofile = !isatty(fileno((FILE *)stderr)); if (!(writetocon || writetofile)) return; char datetime[64]; if (opt_log_microseconds) { struct timeval tv; struct tm tm; bfg_gettimeofday(&tv); localtime_r(&tv.tv_sec, &tm); snprintf(datetime, sizeof(datetime), "[%d-%02d-%02d %02d:%02d:%02d.%06ld]", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, (long)tv.tv_usec); } else get_now_datestamp(datetime, sizeof(datetime)); if (writetofile || writetocon) { bfg_console_lock(); /* Only output to stderr if it's not going to the screen as well */ if (writetofile) { fprintf(stderr, " %s %s\n", datetime, str); /* atomic write to stderr */ fflush(stderr); } if (writetocon) _my_log_curses(prio, datetime, str); bfg_console_unlock(); } } } bfgminer-bfgminer-3.10.0/logging.h000066400000000000000000000067611226556647300170040ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * Copyright 2012 zefir * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef __LOGGING_H__ #define __LOGGING_H__ #include "config.h" #include #include #include #include #ifdef HAVE_SYSLOG_H #include #else enum { LOG_ERR, LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG, }; #endif #include "util.h" /* debug flags */ extern bool opt_debug; extern bool opt_debug_console; extern bool opt_log_output; extern bool opt_log_microseconds; extern bool opt_realquiet; extern bool want_per_device_stats; /* global log_level, messages with lower or equal prio are logged */ extern int opt_log_level; #define LOGBUFSIZ 0x1000 extern void _applog(int prio, const char *str); #define IN_FMT_FFL " in %s %s():%d" #define applog(prio, fmt, ...) do { \ if (opt_debug || prio != LOG_DEBUG) { \ char tmp42[LOGBUFSIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ _applog(prio, tmp42); \ } \ } while (0) #define applogsiz(prio, _SIZ, fmt, ...) do { \ if (opt_debug || prio != LOG_DEBUG) { \ char tmp42[_SIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ _applog(prio, tmp42); \ } \ } while (0) #define applogr(rv, prio, ...) do { \ applog(prio, __VA_ARGS__); \ return rv; \ } while (0) #define appperror(prio, s) do { \ const char *_tmp43 = bfg_strerror(errno, BST_ERRNO); \ if (s && s[0]) \ applog(prio, "%s: %s", s, _tmp43); \ else \ _applog(prio, _tmp43); \ } while (0) #define perror(s) appperror(LOG_ERR, s) #define applogfailinfo(prio, failed, fmt, ...) do { \ applog(prio, "Failed to %s"IN_FMT_FFL": "fmt, \ failed, \ __FILE__, __func__, __LINE__, \ __VA_ARGS__); \ } while (0) #define applogfailinfor(rv, prio, failed, fmt, ...) do { \ applogfailinfo(prio, failed, fmt, __VA_ARGS__); \ return rv; \ } while (0) #define applogfail(prio, failed) do { \ applog(prio, "Failed to %s"IN_FMT_FFL, \ failed, \ __FILE__, __func__, __LINE__); \ } while (0) #define applogfailr(rv, prio, failed) do { \ applogfail(prio, failed); \ return rv; \ } while (0) extern void _bfg_clean_up(bool); #define quit(status, fmt, ...) do { \ _bfg_clean_up(false); \ if (fmt) { \ fprintf(stderr, fmt, ##__VA_ARGS__); \ } \ fprintf(stderr, "\n"); \ fflush(stderr); \ _quit(status); \ } while (0) #define quithere(status, fmt, ...) do { \ if (fmt) { \ char tmp42[LOGBUFSIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt IN_FMT_FFL, \ ##__VA_ARGS__, __FILE__, __func__, __LINE__); \ _applog(LOG_ERR, tmp42); \ } \ _quit(status); \ } while (0) #define quitfrom(status, _file, _func, _line, fmt, ...) do { \ if (fmt) { \ char tmp42[LOGBUFSIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt IN_FMT_FFL, \ ##__VA_ARGS__, _file, _func, _line); \ _applog(LOG_ERR, tmp42); \ } \ _quit(status); \ } while (0) #ifdef HAVE_CURSES #define wlog(fmt, ...) do { \ char tmp42[LOGBUFSIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ _wlog(tmp42); \ } while (0) #define wlogprint(fmt, ...) do { \ char tmp42[LOGBUFSIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ _wlogprint(tmp42); \ } while (0) #endif extern void hexdump(const void *, unsigned int len); #endif /* __LOGGING_H__ */ bfgminer-bfgminer-3.10.0/lowl-hid.c000066400000000000000000000117631226556647300170660ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifndef WIN32 #include typedef void *dlh_t; #else #include #define dlopen(lib, flags) LoadLibrary(lib) #define dlsym(h, sym) ((void*)GetProcAddress(h, sym)) #define dlerror() "unknown" #define dlclose(h) FreeLibrary(h) typedef HMODULE dlh_t; #endif #include #include #include #include #include #include #include #include "logging.h" #include "lowlevel.h" #include "miner.h" struct hid_device_info HID_API_EXPORT *(*dlsym_hid_enumerate)(unsigned short, unsigned short); void HID_API_EXPORT (*dlsym_hid_free_enumeration)(struct hid_device_info *); hid_device * HID_API_EXPORT (*dlsym_hid_open_path)(const char *); void HID_API_EXPORT (*dlsym_hid_close)(hid_device *); int HID_API_EXPORT (*dlsym_hid_read)(hid_device *, unsigned char *, size_t); int HID_API_EXPORT (*dlsym_hid_write)(hid_device *, const unsigned char *, size_t); #define LOAD_SYM(sym) do { \ if (!(dlsym_ ## sym = dlsym(dlh, #sym))) { \ applog(LOG_DEBUG, "%s: Failed to load %s in %s", __func__, #sym, dlname); \ goto fail; \ } \ } while(0) static bool hidapi_libusb; static struct hid_device_info *_probe_hid_enum; static bool hidapi_try_lib(const char * const dlname) { struct hid_device_info *hid_enum; dlh_t dlh; dlh = dlopen(dlname, RTLD_NOW); if (!dlh) { applog(LOG_DEBUG, "%s: Couldn't load %s: %s", __func__, dlname, dlerror()); return false; } LOAD_SYM(hid_enumerate); LOAD_SYM(hid_free_enumeration); hid_enum = dlsym_hid_enumerate(0, 0); if (!hid_enum) { applog(LOG_DEBUG, "%s: Loaded %s, but no devices enumerated; trying other libraries", __func__, dlname); goto fail; } _probe_hid_enum = hid_enum; LOAD_SYM(hid_open_path); LOAD_SYM(hid_close); LOAD_SYM(hid_read); LOAD_SYM(hid_write); if (strstr(dlname, "libusb")) hidapi_libusb = true; applog(LOG_DEBUG, "%s: Successfully loaded %s", __func__, dlname); return true; fail: dlclose(dlh); return false; } // #defines hid_* calls, so must be after library loader #include "lowl-hid.h" static bool hidapi_load_library() { if (dlsym_hid_write) return true; const char **p; char dlname[23] = "libhidapi"; const char *dltry[] = { "", "-0", "-hidraw", "-libusb", NULL }; for (p = &dltry[0]; *p; ++p) { sprintf(&dlname[9], "%s.%s", *p, #ifdef WIN32 "dll" #elif defined(__APPLE__) //Mach-O uses dylibs for shared libraries //http://www.finkproject.org/doc/porting/porting.en.html#shared "dylib" #else "so" #endif ); if (hidapi_try_lib(dlname)) return true; } return false; } static char *wcs2str_dup(wchar_t *ws) { if (!(ws && ws[0])) return NULL; char *rv; int clen, i; clen = wcslen(ws); ++clen; rv = malloc(clen); for (i = 0; i < clen; ++i) rv[i] = ws[i]; return rv; } static struct lowlevel_device_info *hid_devinfo_scan() { if (!hidapi_load_library()) { applog(LOG_DEBUG, "%s: Failed to load any hidapi library", __func__); return NULL; } struct hid_device_info *hid_enum, *hid_item; struct lowlevel_device_info *info, *devinfo_list = NULL; if (_probe_hid_enum) { hid_enum = _probe_hid_enum; _probe_hid_enum = NULL; } else hid_enum = hid_enumerate(0, 0); if (!hid_enum) { applog(LOG_DEBUG, "%s: No HID devices found", __func__); return NULL; } LL_FOREACH(hid_enum, hid_item) { info = malloc(sizeof(struct lowlevel_device_info)); char *devid; const char * const hidpath = hid_item->path; if (hidapi_libusb && strlen(hidpath) == 12 && hidpath[0] == '0' && hidpath[1] == '0' && isxdigit(hidpath[2]) && isxdigit(hidpath[3]) && hidpath[4] == ':' && hidpath[5] == '0' && hidpath[6] == '0' && isxdigit(hidpath[7]) && isxdigit(hidpath[8]) && hidpath[9] == ':') { unsigned char usbbus, usbaddr; hex2bin(&usbbus , &hidpath[2], 1); hex2bin(&usbaddr, &hidpath[7], 1); devid = bfg_make_devid_usb(usbbus, usbaddr); } else { devid = malloc(4 + strlen(hid_item->path) + 1); sprintf(devid, "hid:%s", hid_item->path); } *info = (struct lowlevel_device_info){ .lowl = &lowl_hid, .path = strdup(hid_item->path), .devid = devid, .vid = hid_item->vendor_id, .pid = hid_item->product_id, .manufacturer = wcs2str_dup(hid_item->manufacturer_string), .product = wcs2str_dup(hid_item->product_string), .serial = wcs2str_dup(hid_item->serial_number), }; LL_PREPEND(devinfo_list, info); applog(LOG_DEBUG, "%s: Found \"%s\" serial \"%s\"", __func__, info->product, info->serial); } hid_free_enumeration(hid_enum); return devinfo_list; } struct lowlevel_driver lowl_hid = { .dname = "hid", .devinfo_scan = hid_devinfo_scan, }; bfgminer-bfgminer-3.10.0/lowl-hid.h000066400000000000000000000016201226556647300170620ustar00rootroot00000000000000#ifndef BFG_LOWL_HID_H #define BFG_LOWL_HID_H #include #ifdef WIN32 #define HID_API_EXPORT __declspec(dllexport) #else #define HID_API_EXPORT /* */ #endif extern struct hid_device_info HID_API_EXPORT *(*dlsym_hid_enumerate)(unsigned short, unsigned short); extern void HID_API_EXPORT (*dlsym_hid_free_enumeration)(struct hid_device_info *); extern hid_device * HID_API_EXPORT (*dlsym_hid_open_path)(const char *); extern void HID_API_EXPORT (*dlsym_hid_close)(hid_device *); extern int HID_API_EXPORT (*dlsym_hid_read)(hid_device *, unsigned char *, size_t); extern int HID_API_EXPORT (*dlsym_hid_write)(hid_device *, const unsigned char *, size_t); #define hid_enumerate dlsym_hid_enumerate #define hid_free_enumeration dlsym_hid_free_enumeration #define hid_open_path dlsym_hid_open_path #define hid_close dlsym_hid_close #define hid_read dlsym_hid_read #define hid_write dlsym_hid_write #endif bfgminer-bfgminer-3.10.0/lowl-usb.c000066400000000000000000000202201226556647300170770ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include "logging.h" #include "lowlevel.h" #include "lowl-usb.h" #include "miner.h" #include "util.h" static char *lowl_libusb_dup_string(libusb_device_handle * const handle, const uint8_t idx, const char * const idxname, const char * const fname, const char * const devid) { if (!idx) return NULL; unsigned char buf[0x100]; const int n = libusb_get_string_descriptor_ascii(handle, idx, buf, sizeof(buf)-1); if (unlikely(n < 0)) { // This could be LOG_ERR, but it's annoyingly common :/ applog(LOG_DEBUG, "%s: Error getting USB string %d (%s) from %s: %s", fname, idx, idxname, devid, bfg_strerror(n, BST_LIBUSB)); return NULL; } if (n == 0) return NULL; buf[n] = '\0'; return strdup((void*)buf); } static void usb_devinfo_free(struct lowlevel_device_info * const info) { libusb_device * const dev = info->lowl_data; if (dev) libusb_unref_device(dev); } static struct lowlevel_device_info *usb_devinfo_scan() { struct lowlevel_device_info *devinfo_list = NULL; ssize_t count, i; libusb_device **list; struct libusb_device_descriptor desc; libusb_device_handle *handle; struct lowlevel_device_info *info; int err; if (unlikely(!have_libusb)) return NULL; count = libusb_get_device_list(NULL, &list); if (unlikely(count < 0)) { applog(LOG_ERR, "%s: Error getting USB device list: %s", __func__, bfg_strerror(count, BST_LIBUSB)); return NULL; } for (i = 0; i < count; ++i) { err = libusb_get_device_descriptor(list[i], &desc); if (unlikely(err)) { applog(LOG_ERR, "%s: Error getting device descriptor: %s", __func__, bfg_strerror(err, BST_LIBUSB)); continue; } info = malloc(sizeof(struct lowlevel_device_info)); *info = (struct lowlevel_device_info){ .lowl = &lowl_usb, .devid = bfg_make_devid_libusb(list[i]), .lowl_data = libusb_ref_device(list[i]), .vid = desc.idVendor, .pid = desc.idProduct, }; err = libusb_open(list[i], &handle); if (unlikely(err)) applog(LOG_DEBUG, "%s: Error opening device %s: %s", __func__, info->devid, bfg_strerror(err, BST_LIBUSB)); else { info->manufacturer = lowl_libusb_dup_string(handle, desc.iManufacturer, "iManufacturer", __func__, info->devid); info->product = lowl_libusb_dup_string(handle, desc.iProduct, "iProduct", __func__, info->devid); info->serial = lowl_libusb_dup_string(handle, desc.iSerialNumber, "iSerialNumber", __func__, info->devid); libusb_close(handle); } LL_PREPEND(devinfo_list, info); } libusb_free_device_list(list, 1); return devinfo_list; } bool lowl_usb_attach_kernel_driver(const struct lowlevel_device_info * const info) { libusb_device * const dev = info->lowl_data; libusb_device_handle *devh; bool rv = false; if (libusb_open(dev, &devh)) return false; if (libusb_kernel_driver_active(devh, 0) == 0) if (!libusb_attach_kernel_driver(devh, 0)) { applog(LOG_DEBUG, "Reattaching kernel driver for %s", info->devid); rv = true; } libusb_close(devh); return rv; } struct libusb_device_handle *lowl_usb_open(struct lowlevel_device_info * const info) { libusb_device * const dev = info->lowl_data; if (!dev) return NULL; libusb_device_handle *devh; if (libusb_open(dev, &devh)) { applog(LOG_ERR, "%s: Error opening device", __func__); return NULL; } return devh; } struct device_drv *bfg_claim_usb(struct device_drv * const api, const bool verbose, const uint8_t usbbus, const uint8_t usbaddr) { char * const devpath = bfg_make_devid_usb(usbbus, usbaddr); struct device_drv * const rv = bfg_claim_any(api, verbose ? "" : NULL, devpath); free(devpath); return rv; } #ifdef HAVE_LIBUSB void cgpu_copy_libusb_strings(struct cgpu_info *cgpu, libusb_device *usb) { unsigned char buf[0x20]; libusb_device_handle *h; struct libusb_device_descriptor desc; if (LIBUSB_SUCCESS != libusb_open(usb, &h)) return; if (libusb_get_device_descriptor(usb, &desc)) { libusb_close(h); return; } if ((!cgpu->dev_manufacturer) && libusb_get_string_descriptor_ascii(h, desc.iManufacturer, buf, sizeof(buf)) >= 0) cgpu->dev_manufacturer = strdup((void *)buf); if ((!cgpu->dev_product) && libusb_get_string_descriptor_ascii(h, desc.iProduct, buf, sizeof(buf)) >= 0) cgpu->dev_product = strdup((void *)buf); if ((!cgpu->dev_serial) && libusb_get_string_descriptor_ascii(h, desc.iSerialNumber, buf, sizeof(buf)) >= 0) cgpu->dev_serial = strdup((void *)buf); libusb_close(h); } #endif struct lowl_usb_endpoint { struct libusb_device_handle *devh; unsigned char endpoint_r; int packetsz_r; bytes_t _buf_r; unsigned timeout_ms_r; unsigned char endpoint_w; int packetsz_w; unsigned timeout_ms_w; }; struct lowl_usb_endpoint *usb_open_ep(struct libusb_device_handle * const devh, const uint8_t epid, const int pktsz) { struct lowl_usb_endpoint * const ep = malloc(sizeof(*ep)); ep->devh = devh; if (epid & 0x80) { // Read endpoint ep->endpoint_r = epid; ep->packetsz_r = pktsz; bytes_init(&ep->_buf_r); } else { // Write endpoint ep->endpoint_w = epid; ep->packetsz_w = epid; ep->packetsz_r = -1; } return ep; }; struct lowl_usb_endpoint *usb_open_ep_pair(struct libusb_device_handle * const devh, const uint8_t epid_r, const int pktsz_r, const uint8_t epid_w, const int pktsz_w) { struct lowl_usb_endpoint * const ep = malloc(sizeof(*ep)); *ep = (struct lowl_usb_endpoint){ .devh = devh, .endpoint_r = epid_r, .packetsz_r = pktsz_r, ._buf_r = BYTES_INIT, .endpoint_w = epid_w, .packetsz_w = pktsz_w, }; return ep; } void usb_ep_set_timeouts_ms(struct lowl_usb_endpoint * const ep, const unsigned timeout_ms_r, const unsigned timeout_ms_w) { ep->timeout_ms_r = timeout_ms_r; ep->timeout_ms_w = timeout_ms_w; } ssize_t usb_read(struct lowl_usb_endpoint * const ep, void * const data, size_t datasz) { unsigned timeout; size_t xfer; if ( (xfer = bytes_len(&ep->_buf_r)) < datasz) { bytes_extend_buf(&ep->_buf_r, datasz + ep->packetsz_r - 1); unsigned char *p = &bytes_buf(&ep->_buf_r)[xfer]; int pxfer; int rem = datasz - xfer, rsz; timeout = xfer ? 0 : ep->timeout_ms_r; while (rem > 0) { rsz = (rem / ep->packetsz_r) * ep->packetsz_r; if (rsz < rem) rsz += ep->packetsz_r; switch (libusb_bulk_transfer(ep->devh, ep->endpoint_r, p, rsz, &pxfer, timeout)) { case 0: case LIBUSB_ERROR_TIMEOUT: if (!pxfer) // Behaviour is like tcsetattr-style timeout return 0; p += pxfer; rem -= pxfer; // NOTE: Need to maintain _buf_r length so data is saved in case of error xfer += pxfer; bytes_resize(&ep->_buf_r, xfer); break; case LIBUSB_ERROR_PIPE: case LIBUSB_ERROR_NO_DEVICE: errno = EPIPE; return -1; default: errno = EIO; return -1; } timeout = 0; } } memcpy(data, bytes_buf(&ep->_buf_r), datasz); bytes_shift(&ep->_buf_r, datasz); return datasz; } ssize_t usb_write(struct lowl_usb_endpoint * const ep, const void * const data, size_t datasz) { unsigned timeout = ep->timeout_ms_w; unsigned char *p = (void*)data; size_t rem = datasz; int pxfer; while (rem > 0) { switch (libusb_bulk_transfer(ep->devh, ep->endpoint_w, p, rem, &pxfer, timeout)) { case 0: case LIBUSB_ERROR_TIMEOUT: p += pxfer; rem -= pxfer; break; case LIBUSB_ERROR_PIPE: case LIBUSB_ERROR_NO_DEVICE: errno = EPIPE; return (datasz - rem) ?: -1; default: errno = EIO; return (datasz - rem) ?: -1; } timeout = 0; } errno = 0; return datasz; } void usb_close_ep(struct lowl_usb_endpoint * const ep) { if (ep->packetsz_r != -1) bytes_free(&ep->_buf_r); free(ep); } void lowl_usb_close(struct libusb_device_handle * const devh) { libusb_close(devh); } struct lowlevel_driver lowl_usb = { .dname = "usb", .devinfo_scan = usb_devinfo_scan, .devinfo_free = usb_devinfo_free, }; bfgminer-bfgminer-3.10.0/lowl-usb.h000066400000000000000000000022021226556647300171040ustar00rootroot00000000000000#ifndef BFG_LOWL_USB_H #define BFG_LOWL_USB_H #include #include #include extern struct device_drv *bfg_claim_usb(struct device_drv * const, const bool verbose, const uint8_t usbbus, const uint8_t usbaddr); #define bfg_make_devid_libusb(dev) bfg_make_devid_usb(libusb_get_bus_number(dev), libusb_get_device_address(dev)) #define bfg_claim_libusb(api, verbose, dev) bfg_claim_usb(api, verbose, libusb_get_bus_number(dev), libusb_get_device_address(dev)) extern void cgpu_copy_libusb_strings(struct cgpu_info *, libusb_device *); struct lowl_usb_endpoint; extern struct lowl_usb_endpoint *usb_open_ep(struct libusb_device_handle *, uint8_t epid, int pktsz); extern struct lowl_usb_endpoint *usb_open_ep_pair(struct libusb_device_handle *, uint8_t epid_r, int pktsz_r, uint8_t epid_w, int pktsz_w); extern void usb_ep_set_timeouts_ms(struct lowl_usb_endpoint *, unsigned timeout_ms_r, unsigned timeout_ms_w); extern ssize_t usb_read(struct lowl_usb_endpoint *, void *, size_t); extern ssize_t usb_write(struct lowl_usb_endpoint *, const void *, size_t); extern void usb_close_ep(struct lowl_usb_endpoint *); #endif bfgminer-bfgminer-3.10.0/lowl-vcom.c000066400000000000000000000750261226556647300172700ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2013 Con Kolivas * Copyright 2012 Andrew Smith * Copyright 2013 Xiangfu * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_FILE_H #include #endif #ifdef HAVE_LIBUSB #include #endif #include "miner.h" #ifndef WIN32 #include #include #include #include #include #include #ifndef O_CLOEXEC #define O_CLOEXEC 0 #endif #else /* WIN32 */ #include #ifdef HAVE_WIN_DDKUSB #include #include #include #endif #include #include #define dlsym (void*)GetProcAddress #define dlclose FreeLibrary typedef unsigned long FT_STATUS; typedef PVOID FT_HANDLE; __stdcall FT_STATUS (*FT_ListDevices)(PVOID pArg1, PVOID pArg2, DWORD Flags); __stdcall FT_STATUS (*FT_Open)(int idx, FT_HANDLE*); __stdcall FT_STATUS (*FT_GetComPortNumber)(FT_HANDLE, LPLONG lplComPortNumber); __stdcall FT_STATUS (*FT_Close)(FT_HANDLE); const uint32_t FT_OPEN_BY_SERIAL_NUMBER = 1; const uint32_t FT_OPEN_BY_DESCRIPTION = 2; const uint32_t FT_LIST_ALL = 0x20000000; const uint32_t FT_LIST_BY_INDEX = 0x40000000; const uint32_t FT_LIST_NUMBER_ONLY = 0x80000000; enum { FT_OK, }; #endif /* WIN32 */ #ifdef HAVE_LIBUDEV #include #include #endif #include "logging.h" #include "lowlevel.h" #include "miner.h" #include "util.h" #include "lowl-vcom.h" struct lowlevel_driver lowl_vcom; struct detectone_meta_info_t detectone_meta_info; void clear_detectone_meta_info(void) { detectone_meta_info = (struct detectone_meta_info_t){ .manufacturer = NULL, }; } #define _vcom_unique_id(devpath) devpath_to_devid(devpath) struct lowlevel_device_info *_vcom_devinfo_findorcreate(struct lowlevel_device_info ** const devinfo_list, const char * const devpath) { struct lowlevel_device_info *devinfo; char * const devid = _vcom_unique_id(devpath); if (!devid) return NULL; HASH_FIND_STR(*devinfo_list, devid, devinfo); if (!devinfo) { devinfo = malloc(sizeof(*devinfo)); *devinfo = (struct lowlevel_device_info){ .lowl = &lowl_vcom, .path = strdup(devpath), .devid = devid, }; HASH_ADD_KEYPTR(hh, *devinfo_list, devinfo->devid, strlen(devid), devinfo); } else free(devid); return devinfo; } #ifdef HAVE_LIBUDEV static void _decode_udev_enc(char *o, const char *s) { while(s[0]) { if (s[0] == '\\' && s[1] == 'x' && s[2] && s[3]) { hex2bin((void*)(o++), &s[2], 1); s += 4; } else (o++)[0] = (s++)[0]; } o[0] = '\0'; } static char *_decode_udev_enc_dup(const char *s) { if (!s) return NULL; char *o = malloc(strlen(s) + 1); if (!o) { applog(LOG_ERR, "Failed to malloc in _decode_udev_enc_dup"); return NULL; } _decode_udev_enc(o, s); return o; } static void _vcom_devinfo_scan_udev(struct lowlevel_device_info ** const devinfo_list) { struct udev *udev = udev_new(); struct udev_enumerate *enumerate = udev_enumerate_new(udev); struct udev_list_entry *list_entry; struct lowlevel_device_info *devinfo; udev_enumerate_add_match_subsystem(enumerate, "tty"); udev_enumerate_add_match_property(enumerate, "ID_SERIAL", "*"); udev_enumerate_scan_devices(enumerate); udev_list_entry_foreach(list_entry, udev_enumerate_get_list_entry(enumerate)) { struct udev_device *device = udev_device_new_from_syspath( udev_enumerate_get_udev(enumerate), udev_list_entry_get_name(list_entry) ); if (!device) continue; const char * const devpath = udev_device_get_devnode(device); devinfo = _vcom_devinfo_findorcreate(devinfo_list, devpath); BFGINIT(devinfo->manufacturer, _decode_udev_enc_dup(udev_device_get_property_value(device, "ID_VENDOR_ENC"))); BFGINIT(devinfo->product, _decode_udev_enc_dup(udev_device_get_property_value(device, "ID_MODEL_ENC"))); BFGINIT(devinfo->serial, _decode_udev_enc_dup(udev_device_get_property_value(device, "ID_SERIAL_SHORT"))); udev_device_unref(device); } udev_enumerate_unref(enumerate); udev_unref(udev); } #endif #ifndef WIN32 static void _vcom_devinfo_scan_devserial(struct lowlevel_device_info ** const devinfo_list) { DIR *D; struct dirent *de; const char udevdir[] = "/dev/serial/by-id"; char devpath[sizeof(udevdir) + 1 + NAME_MAX]; char *devfile = devpath + sizeof(udevdir); struct lowlevel_device_info *devinfo; D = opendir(udevdir); if (!D) return; memcpy(devpath, udevdir, sizeof(udevdir) - 1); devpath[sizeof(udevdir) - 1] = '/'; while ( (de = readdir(D)) ) { if (strncmp(de->d_name, "usb-", 4)) continue; strcpy(devfile, de->d_name); devinfo = _vcom_devinfo_findorcreate(devinfo_list, devpath); if (devinfo && !(devinfo->manufacturer || devinfo->product || devinfo->serial)) devinfo->product = strdup(devfile); } closedir(D); } #endif #ifndef WIN32 static char *_sysfs_do_read(const char *devpath, char *devfile, const char *append) { char buf[0x40]; FILE *F; strcpy(devfile, append); F = fopen(devpath, "r"); if (F) { if (fgets(buf, sizeof(buf), F)) { size_t L = strlen(buf); while (isCspace(buf[--L])) buf[L] = '\0'; } else buf[0] = '\0'; fclose(F); } else buf[0] = '\0'; return buf[0] ? strdup(buf) : NULL; } static void _sysfs_find_tty(char *devpath, char *devfile, struct lowlevel_device_info ** const devinfo_list) { struct lowlevel_device_info *devinfo; DIR *DT; struct dirent *de; char ttybuf[0x10] = "/dev/"; char *mydevfile = strdup(devfile); DT = opendir(devpath); if (!DT) goto out; while ( (de = readdir(DT)) ) { if (strncmp(de->d_name, "tty", 3)) continue; if (!de->d_name[3]) { // "tty" directory: recurse (needed for ttyACM) sprintf(devfile, "%s/tty", mydevfile); _sysfs_find_tty(devpath, devfile, devinfo_list); continue; } if (strncmp(&de->d_name[3], "USB", 3) && strncmp(&de->d_name[3], "ACM", 3)) continue; strcpy(&ttybuf[5], de->d_name); devinfo = _vcom_devinfo_findorcreate(devinfo_list, ttybuf); if (!devinfo) continue; BFGINIT(devinfo->manufacturer, _sysfs_do_read(devpath, devfile, "/manufacturer")); BFGINIT(devinfo->product, _sysfs_do_read(devpath, devfile, "/product")); BFGINIT(devinfo->serial, _sysfs_do_read(devpath, devfile, "/serial")); } closedir(DT); out: free(mydevfile); } static void _vcom_devinfo_scan_sysfs(struct lowlevel_device_info ** const devinfo_list) { DIR *D, *DS; struct dirent *de; const char devroot[] = "/sys/bus/usb/devices"; const size_t devrootlen = sizeof(devroot) - 1; char devpath[sizeof(devroot) + (NAME_MAX * 3)]; char *devfile, *upfile; size_t len, len2; D = opendir(devroot); if (!D) return; memcpy(devpath, devroot, devrootlen); devpath[devrootlen] = '/'; while ( (de = readdir(D)) ) { len = strlen(de->d_name); upfile = &devpath[devrootlen + 1]; memcpy(upfile, de->d_name, len); devfile = upfile + len; devfile[0] = '\0'; DS = opendir(devpath); if (!DS) continue; devfile[0] = '/'; ++devfile; while ( (de = readdir(DS)) ) { if (strncmp(de->d_name, upfile, len)) continue; len2 = strlen(de->d_name); memcpy(devfile, de->d_name, len2 + 1); _sysfs_find_tty(devpath, devfile, devinfo_list); } closedir(DS); } closedir(D); } #endif #ifdef HAVE_WIN_DDKUSB static const GUID WIN_GUID_DEVINTERFACE_USB_HOST_CONTROLLER = { 0x3ABF6F2D, 0x71C4, 0x462A, {0x8A, 0x92, 0x1E, 0x68, 0x61, 0xE6, 0xAF, 0x27} }; static char *windows_usb_get_port_path(HANDLE hubh, const int portno) { size_t namesz; ULONG rsz; { USB_NODE_CONNECTION_NAME pathinfo = { .ConnectionIndex = portno, }; if (!(DeviceIoControl(hubh, IOCTL_USB_GET_NODE_CONNECTION_NAME, &pathinfo, sizeof(pathinfo), &pathinfo, sizeof(pathinfo), &rsz, NULL) && rsz >= sizeof(pathinfo))) applogfailinfor(NULL, LOG_ERR, "ioctl (1)", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); namesz = pathinfo.ActualLength; } const size_t bufsz = sizeof(USB_NODE_CONNECTION_NAME) + namesz; uint8_t buf[bufsz]; USB_NODE_CONNECTION_NAME *path = (USB_NODE_CONNECTION_NAME *)buf; *path = (USB_NODE_CONNECTION_NAME){ .ConnectionIndex = portno, }; if (!(DeviceIoControl(hubh, IOCTL_USB_GET_NODE_CONNECTION_NAME, path, bufsz, path, bufsz, &rsz, NULL) && rsz >= sizeof(*path))) applogfailinfor(NULL, LOG_ERR, "ioctl (2)", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); return ucs2_to_utf8_dup(path->NodeName, path->ActualLength); } static char *windows_usb_get_string(HANDLE hubh, const int portno, const uint8_t descid) { if (!descid) return NULL; const size_t descsz_max = sizeof(USB_STRING_DESCRIPTOR) + MAXIMUM_USB_STRING_LENGTH; const size_t reqsz = sizeof(USB_DESCRIPTOR_REQUEST) + descsz_max; uint8_t buf[reqsz]; USB_DESCRIPTOR_REQUEST * const req = (USB_DESCRIPTOR_REQUEST *)buf; USB_STRING_DESCRIPTOR * const desc = (USB_STRING_DESCRIPTOR *)&req[1]; *req = (USB_DESCRIPTOR_REQUEST){ .ConnectionIndex = portno, .SetupPacket = { .wValue = (USB_STRING_DESCRIPTOR_TYPE << 8) | descid, .wIndex = 0, .wLength = descsz_max, }, }; // Need to explicitly zero the output memory memset(desc, '\0', descsz_max); ULONG descsz; if (!DeviceIoControl(hubh, IOCTL_USB_GET_DESCRIPTOR_FROM_NODE_CONNECTION, req, reqsz, req, reqsz, &descsz, NULL)) applogfailinfor(NULL, LOG_DEBUG, "ioctl", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); if (descsz < 2 || desc->bDescriptorType != USB_STRING_DESCRIPTOR_TYPE || desc->bLength > descsz - sizeof(USB_DESCRIPTOR_REQUEST) || desc->bLength % 2) applogfailr(NULL, LOG_ERR, "sanity check"); return ucs2_to_utf8_dup(desc->bString, desc->bLength); } static void _vcom_devinfo_scan_windows__hub(struct lowlevel_device_info **, const char *); static void _vcom_devinfo_scan_windows__hubport(struct lowlevel_device_info ** const devinfo_list, HANDLE hubh, const int portno) { struct lowlevel_device_info *devinfo; const size_t conninfosz = sizeof(USB_NODE_CONNECTION_INFORMATION) + (sizeof(USB_PIPE_INFO) * 30); uint8_t buf[conninfosz]; USB_NODE_CONNECTION_INFORMATION * const conninfo = (USB_NODE_CONNECTION_INFORMATION *)buf; conninfo->ConnectionIndex = portno; ULONG respsz; if (!DeviceIoControl(hubh, IOCTL_USB_GET_NODE_CONNECTION_INFORMATION, conninfo, conninfosz, conninfo, conninfosz, &respsz, NULL)) applogfailinfor(, LOG_ERR, "ioctl", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); if (conninfo->ConnectionStatus != DeviceConnected) return; if (conninfo->DeviceIsHub) { const char * const hubpath = windows_usb_get_port_path(hubh, portno); if (hubpath) _vcom_devinfo_scan_windows__hub(devinfo_list, hubpath); return; } const USB_DEVICE_DESCRIPTOR * const devdesc = &conninfo->DeviceDescriptor; char * const serial = windows_usb_get_string(hubh, portno, devdesc->iSerialNumber); if (!serial) { out: free(serial); return; } const size_t slen = strlen(serial); char subkey[52 + slen + 18 + 1]; sprintf(subkey, "SYSTEM\\CurrentControlSet\\Enum\\USB\\VID_%04x&PID_%04x\\%s\\Device Parameters", (unsigned)devdesc->idVendor, (unsigned)devdesc->idProduct, serial); HKEY hkey; int e; if (ERROR_SUCCESS != (e = RegOpenKey(HKEY_LOCAL_MACHINE, subkey, &hkey))) { applogfailinfo(LOG_ERR, "open Device Parameters registry key", "%s", bfg_strerror(e, BST_SYSTEM)); goto out; } char devpath[0x10] = "\\\\.\\"; DWORD type, sz = sizeof(devpath) - 4; if (ERROR_SUCCESS != (e = RegQueryValueExA(hkey, "PortName", NULL, &type, (LPBYTE)&devpath[4], &sz))) { applogfailinfo(LOG_DEBUG, "get PortName registry key value", "%s", bfg_strerror(e, BST_SYSTEM)); RegCloseKey(hkey); goto out; } RegCloseKey(hkey); if (type != REG_SZ) { applogfailinfor(, LOG_ERR, "get expected type for PortName registry key value", "%ld", (long)type); goto out; } devinfo = _vcom_devinfo_findorcreate(devinfo_list, devpath); if (!devinfo) { free(serial); return; } BFGINIT(devinfo->manufacturer, windows_usb_get_string(hubh, portno, devdesc->iManufacturer)); BFGINIT(devinfo->product, windows_usb_get_string(hubh, portno, devdesc->iProduct)); if (devinfo->serial) free(serial); else devinfo->serial = serial; } static void _vcom_devinfo_scan_windows__hub(struct lowlevel_device_info ** const devinfo_list, const char * const hubpath) { HANDLE hubh; USB_NODE_INFORMATION nodeinfo; { char deviceName[4 + strlen(hubpath) + 1]; sprintf(deviceName, "\\\\.\\%s", hubpath); hubh = CreateFile(deviceName, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); if (hubh == INVALID_HANDLE_VALUE) applogr(, LOG_ERR, "Error opening USB hub device %s for autodetect: %s", deviceName, bfg_strerror(GetLastError(), BST_SYSTEM)); } ULONG nBytes; if (!DeviceIoControl(hubh, IOCTL_USB_GET_NODE_INFORMATION, &nodeinfo, sizeof(nodeinfo), &nodeinfo, sizeof(nodeinfo), &nBytes, NULL)) applogfailinfor(, LOG_ERR, "ioctl", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); const int portcount = nodeinfo.u.HubInformation.HubDescriptor.bNumberOfPorts; for (int i = 1; i <= portcount; ++i) _vcom_devinfo_scan_windows__hubport(devinfo_list, hubh, i); CloseHandle(hubh); } static char *windows_usb_get_root_hub_path(HANDLE hcntlrh) { size_t namesz; ULONG rsz; { USB_ROOT_HUB_NAME pathinfo; if (!DeviceIoControl(hcntlrh, IOCTL_USB_GET_ROOT_HUB_NAME, 0, 0, &pathinfo, sizeof(pathinfo), &rsz, NULL)) applogfailinfor(NULL, LOG_ERR, "ioctl (1)", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); if (rsz < sizeof(pathinfo)) applogfailinfor(NULL, LOG_ERR, "ioctl (1)", "Size too small (%d < %d)", (int)rsz, (int)sizeof(pathinfo)); namesz = pathinfo.ActualLength; } const size_t bufsz = sizeof(USB_ROOT_HUB_NAME) + namesz; uint8_t buf[bufsz]; USB_ROOT_HUB_NAME *hubpath = (USB_ROOT_HUB_NAME *)buf; if (!(DeviceIoControl(hcntlrh, IOCTL_USB_GET_ROOT_HUB_NAME, NULL, 0, hubpath, bufsz, &rsz, NULL) && rsz >= sizeof(*hubpath))) applogfailinfor(NULL, LOG_ERR, "ioctl (2)", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); return ucs2_to_utf8_dup(hubpath->RootHubName, hubpath->ActualLength); } static void _vcom_devinfo_scan_windows__hcntlr(struct lowlevel_device_info ** const devinfo_list, HDEVINFO *devinfo, const int i) { SP_DEVICE_INTERFACE_DATA devifacedata = { .cbSize = sizeof(devifacedata), }; if (!SetupDiEnumDeviceInterfaces(*devinfo, 0, (LPGUID)&WIN_GUID_DEVINTERFACE_USB_HOST_CONTROLLER, i, &devifacedata)) applogfailinfor(, LOG_ERR, "SetupDiEnumDeviceInterfaces", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); DWORD detailsz; if (!(!SetupDiGetDeviceInterfaceDetail(*devinfo, &devifacedata, NULL, 0, &detailsz, NULL) && GetLastError() == ERROR_INSUFFICIENT_BUFFER)) applogfailinfor(, LOG_ERR, "SetupDiEnumDeviceInterfaceDetail (1)", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); PSP_DEVICE_INTERFACE_DETAIL_DATA detail = alloca(detailsz); detail->cbSize = sizeof(*detail); if (!SetupDiGetDeviceInterfaceDetail(*devinfo, &devifacedata, detail, detailsz, &detailsz, NULL)) applogfailinfor(, LOG_ERR, "SetupDiEnumDeviceInterfaceDetail (2)", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); HANDLE hcntlrh = CreateFile(detail->DevicePath, GENERIC_WRITE, FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL); if (hcntlrh == INVALID_HANDLE_VALUE) applogfailinfor(, LOG_DEBUG, "open USB host controller device", "%s", bfg_strerror(GetLastError(), BST_SYSTEM)); char * const hubpath = windows_usb_get_root_hub_path(hcntlrh); CloseHandle(hcntlrh); if (unlikely(!hubpath)) return; _vcom_devinfo_scan_windows__hub(devinfo_list, hubpath); free(hubpath); } static void _vcom_devinfo_scan_windows(struct lowlevel_device_info ** const devinfo_list) { HDEVINFO devinfo; devinfo = SetupDiGetClassDevs(&WIN_GUID_DEVINTERFACE_USB_HOST_CONTROLLER, NULL, NULL, (DIGCF_PRESENT | DIGCF_DEVICEINTERFACE)); SP_DEVINFO_DATA devinfodata = { .cbSize = sizeof(devinfodata), }; for (int i = 0; SetupDiEnumDeviceInfo(devinfo, i, &devinfodata); ++i) _vcom_devinfo_scan_windows__hcntlr(devinfo_list, &devinfo, i); SetupDiDestroyDeviceInfoList(devinfo); } #endif #ifdef WIN32 #define LOAD_SYM(sym) do { \ if (!(sym = dlsym(dll, #sym))) { \ applog(LOG_DEBUG, "Failed to load " #sym ", not using FTDI autodetect"); \ goto out; \ } \ } while(0) static char *_ftdi_get_string(char *buf, intptr_t i, DWORD flags) { if (FT_OK != FT_ListDevices((PVOID)i, buf, FT_LIST_BY_INDEX | flags)) return NULL; return buf[0] ? buf : NULL; } static void _vcom_devinfo_scan_ftdi(struct lowlevel_device_info ** const devinfo_list) { char devpath[] = "\\\\.\\COMnnnnn"; char *devpathnum = &devpath[7]; char **bufptrs; char *buf; char serial[64]; struct lowlevel_device_info *devinfo; DWORD i; FT_STATUS ftStatus; DWORD numDevs; HMODULE dll = LoadLibrary("FTD2XX.DLL"); if (!dll) { applog(LOG_DEBUG, "FTD2XX.DLL failed to load, not using FTDI autodetect"); return; } LOAD_SYM(FT_ListDevices); LOAD_SYM(FT_Open); LOAD_SYM(FT_GetComPortNumber); LOAD_SYM(FT_Close); ftStatus = FT_ListDevices(&numDevs, NULL, FT_LIST_NUMBER_ONLY); if (ftStatus != FT_OK) { applog(LOG_DEBUG, "FTDI device count failed, not using FTDI autodetect"); goto out; } applog(LOG_DEBUG, "FTDI reports %u devices", (unsigned)numDevs); buf = alloca(65 * numDevs); bufptrs = alloca(sizeof(*bufptrs) * (numDevs + 1)); for (i = 0; i < numDevs; ++i) bufptrs[i] = &buf[i * 65]; bufptrs[numDevs] = NULL; ftStatus = FT_ListDevices(bufptrs, &numDevs, FT_LIST_ALL | FT_OPEN_BY_DESCRIPTION); if (ftStatus != FT_OK) { applog(LOG_DEBUG, "FTDI device list failed, not using FTDI autodetect"); goto out; } for (i = numDevs; i > 0; ) { --i; bufptrs[i][64] = '\0'; FT_HANDLE ftHandle; if (FT_OK != FT_Open(i, &ftHandle)) continue; LONG lComPortNumber; ftStatus = FT_GetComPortNumber(ftHandle, &lComPortNumber); FT_Close(ftHandle); if (FT_OK != ftStatus || lComPortNumber < 0) continue; applog(LOG_ERR, "FT_GetComPortNumber(%p (%ld), %ld)", ftHandle, (long)i, (long)lComPortNumber); sprintf(devpathnum, "%d", (int)lComPortNumber); devinfo = _vcom_devinfo_findorcreate(devinfo_list, devpath); if (!devinfo) continue; BFGINIT(devinfo->product, (bufptrs[i] && bufptrs[i][0]) ? strdup(bufptrs[i]) : NULL); BFGINIT(devinfo->serial, maybe_strdup(_ftdi_get_string(serial, i, FT_OPEN_BY_SERIAL_NUMBER))); } out: dlclose(dll); } #endif #ifdef WIN32 extern void _vcom_devinfo_scan_querydosdevice(struct lowlevel_device_info **); #else extern void _vcom_devinfo_scan_lsdev(struct lowlevel_device_info **); #endif void _vcom_devinfo_scan_user(struct lowlevel_device_info ** const devinfo_list) { struct string_elist *sd_iter, *sd_tmp; DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp) { const char * const dname = sd_iter->string; const char * const colon = strpbrk(dname, ":@"); const char *dev; if (!(colon && colon != dname)) dev = dname; else dev = &colon[1]; if (!access(dev, F_OK)) _vcom_devinfo_findorcreate(devinfo_list, dev); } } extern bool lowl_usb_attach_kernel_driver(const struct lowlevel_device_info *); bool vcom_lowl_probe_wrapper(const struct lowlevel_device_info * const info, detectone_func_t detectone) { if (info->lowl != &lowl_vcom) { #ifdef HAVE_LIBUSB if (info->lowl == &lowl_usb) { if (lowl_usb_attach_kernel_driver(info)) bfg_need_detect_rescan = true; } #endif return false; } detectone_meta_info = (struct detectone_meta_info_t){ .manufacturer = info->manufacturer, .product = info->product, .serial = info->serial, }; const bool rv = detectone(info->path); clear_detectone_meta_info(); return rv; } bool _serial_autodetect_found_cb(struct lowlevel_device_info * const devinfo, void *userp) { detectone_func_t detectone = userp; if (bfg_claim_any(NULL, devinfo->path, devinfo->devid)) { applog(LOG_DEBUG, "%s (%s) is already claimed, skipping probe", devinfo->path, devinfo->devid); return false; } if (devinfo->lowl != &lowl_vcom) { #ifdef HAVE_LIBUSB if (devinfo->lowl == &lowl_usb) { if (lowl_usb_attach_kernel_driver(devinfo)) bfg_need_detect_rescan = true; } else #endif applog(LOG_WARNING, "Non-VCOM %s (%s) matched", devinfo->path, devinfo->devid); return false; } detectone_meta_info = (struct detectone_meta_info_t){ .manufacturer = devinfo->manufacturer, .product = devinfo->product, .serial = devinfo->serial, }; const bool rv = detectone(devinfo->path); clear_detectone_meta_info(); return rv; } int _serial_autodetect(detectone_func_t detectone, ...) { va_list needles; char *needles_array[0x10]; int needlecount = 0; va_start(needles, detectone); while ( (needles_array[needlecount++] = va_arg(needles, void *)) ) {} va_end(needles); return _lowlevel_detect(_serial_autodetect_found_cb, NULL, (const char **)needles_array, detectone); } static struct lowlevel_device_info *vcom_devinfo_scan() { struct lowlevel_device_info *devinfo_hash = NULL; struct lowlevel_device_info *devinfo_list = NULL; struct lowlevel_device_info *devinfo, *tmp; // All 3 USB Strings available: #ifndef WIN32 _vcom_devinfo_scan_sysfs(&devinfo_hash); #endif #ifdef HAVE_WIN_DDKUSB _vcom_devinfo_scan_windows(&devinfo_hash); #endif #ifdef HAVE_LIBUDEV _vcom_devinfo_scan_udev(&devinfo_hash); #endif // Missing Manufacturer: #ifdef WIN32 _vcom_devinfo_scan_ftdi(&devinfo_hash); #endif // All blobbed together: #ifndef WIN32 _vcom_devinfo_scan_devserial(&devinfo_hash); #endif // No info: #ifdef WIN32 _vcom_devinfo_scan_querydosdevice(&devinfo_hash); #else _vcom_devinfo_scan_lsdev(&devinfo_hash); #endif _vcom_devinfo_scan_user(&devinfo_hash); // Convert hash to simple list HASH_ITER(hh, devinfo_hash, devinfo, tmp) { LL_PREPEND(devinfo_list, devinfo); } HASH_CLEAR(hh, devinfo_hash); return devinfo_list; } struct device_drv *bfg_claim_serial(struct device_drv * const api, const bool verbose, const char * const devpath) { char * const devs = _vcom_unique_id(devpath); if (!devs) return false; struct device_drv * const rv = bfg_claim_any(api, (verbose ? devpath : NULL), devs); free(devs); return rv; } // This code is purely for debugging but is very useful for that // It also took quite a bit of effort so I left it in // #define TERMIOS_DEBUG 1 // Here to include it at compile time // It's off by default #ifndef WIN32 #ifdef TERMIOS_DEBUG #define BITSSET "Y" #define BITSNOTSET "N" int tiospeed(speed_t speed) { switch (speed) { #define IOSPEED(baud) \ case B ## baud: \ return baud; \ // END #include "iospeeds_local.h" #undef IOSPEED default: return -1; } } void termios_debug(const char *devpath, struct termios *my_termios, const char *msg) { applog(LOG_DEBUG, "TIOS: Open %s attributes %s: ispeed=%d ospeed=%d", devpath, msg, tiospeed(cfgetispeed(my_termios)), tiospeed(cfgetispeed(my_termios))); #define ISSETI(b) ((my_termios->c_iflag | (b)) ? BITSSET : BITSNOTSET) applog(LOG_DEBUG, "TIOS: c_iflag: IGNBRK=%s BRKINT=%s IGNPAR=%s PARMRK=%s INPCK=%s ISTRIP=%s INLCR=%s IGNCR=%s ICRNL=%s IUCLC=%s IXON=%s IXANY=%s IOFF=%s IMAXBEL=%s IUTF8=%s", ISSETI(IGNBRK), ISSETI(BRKINT), ISSETI(IGNPAR), ISSETI(PARMRK), ISSETI(INPCK), ISSETI(ISTRIP), ISSETI(INLCR), ISSETI(IGNCR), ISSETI(ICRNL), ISSETI(IUCLC), ISSETI(IXON), ISSETI(IXANY), ISSETI(IXOFF), ISSETI(IMAXBEL), ISSETI(IUTF8)); #define ISSETO(b) ((my_termios->c_oflag | (b)) ? BITSSET : BITSNOTSET) #define VALO(b) (my_termios->c_oflag | (b)) applog(LOG_DEBUG, "TIOS: c_oflag: OPOST=%s OLCUC=%s ONLCR=%s OCRNL=%s ONOCR=%s ONLRET=%s OFILL=%s OFDEL=%s NLDLY=%d CRDLY=%d TABDLY=%d BSDLY=%d VTDLY=%d FFDLY=%d", ISSETO(OPOST), ISSETO(OLCUC), ISSETO(ONLCR), ISSETO(OCRNL), ISSETO(ONOCR), ISSETO(ONLRET), ISSETO(OFILL), ISSETO(OFDEL), VALO(NLDLY), VALO(CRDLY), VALO(TABDLY), VALO(BSDLY), VALO(VTDLY), VALO(FFDLY)); #define ISSETC(b) ((my_termios->c_cflag | (b)) ? BITSSET : BITSNOTSET) #define VALC(b) (my_termios->c_cflag | (b)) applog(LOG_DEBUG, "TIOS: c_cflag: CBAUDEX=%s CSIZE=%d CSTOPB=%s CREAD=%s PARENB=%s PARODD=%s HUPCL=%s CLOCAL=%s" #ifdef LOBLK " LOBLK=%s" #endif " CMSPAR=%s CRTSCTS=%s", ISSETC(CBAUDEX), VALC(CSIZE), ISSETC(CSTOPB), ISSETC(CREAD), ISSETC(PARENB), ISSETC(PARODD), ISSETC(HUPCL), ISSETC(CLOCAL), #ifdef LOBLK ISSETC(LOBLK), #endif ISSETC(CMSPAR), ISSETC(CRTSCTS)); #define ISSETL(b) ((my_termios->c_lflag | (b)) ? BITSSET : BITSNOTSET) applog(LOG_DEBUG, "TIOS: c_lflag: ISIG=%s ICANON=%s XCASE=%s ECHO=%s ECHOE=%s ECHOK=%s ECHONL=%s ECHOCTL=%s ECHOPRT=%s ECHOKE=%s" #ifdef DEFECHO " DEFECHO=%s" #endif " FLUSHO=%s NOFLSH=%s TOSTOP=%s PENDIN=%s IEXTEN=%s", ISSETL(ISIG), ISSETL(ICANON), ISSETL(XCASE), ISSETL(ECHO), ISSETL(ECHOE), ISSETL(ECHOK), ISSETL(ECHONL), ISSETL(ECHOCTL), ISSETL(ECHOPRT), ISSETL(ECHOKE), #ifdef DEFECHO ISSETL(DEFECHO), #endif ISSETL(FLUSHO), ISSETL(NOFLSH), ISSETL(TOSTOP), ISSETL(PENDIN), ISSETL(IEXTEN)); #define VALCC(b) (my_termios->c_cc[b]) applog(LOG_DEBUG, "TIOS: c_cc: VINTR=0x%02x VQUIT=0x%02x VERASE=0x%02x VKILL=0x%02x VEOF=0x%02x VMIN=%u VEOL=0x%02x VTIME=%u VEOL2=0x%02x" #ifdef VSWTCH " VSWTCH=0x%02x" #endif " VSTART=0x%02x VSTOP=0x%02x VSUSP=0x%02x" #ifdef VDSUSP " VDSUSP=0x%02x" #endif " VLNEXT=0x%02x VWERASE=0x%02x VREPRINT=0x%02x VDISCARD=0x%02x" #ifdef VSTATUS " VSTATUS=0x%02x" #endif , VALCC(VINTR), VALCC(VQUIT), VALCC(VERASE), VALCC(VKILL), VALCC(VEOF), VALCC(VMIN), VALCC(VEOL), VALCC(VTIME), VALCC(VEOL2), #ifdef VSWTCH VALCC(VSWTCH), #endif VALCC(VSTART), VALCC(VSTOP), VALCC(VSUSP), #ifdef VDSUSP VALCC(VDSUSP), #endif VALCC(VLNEXT), VALCC(VWERASE), VALCC(VREPRINT), VALCC(VDISCARD) #ifdef VSTATUS ,VALCC(VSTATUS) #endif ); } #endif /* TERMIOS_DEBUG */ speed_t tiospeed_t(int baud) { switch (baud) { #define IOSPEED(baud) \ case baud: \ return B ## baud; \ // END #include "iospeeds_local.h" #undef IOSPEED default: return B0; } } #endif /* WIN32 */ bool valid_baud(int baud) { switch (baud) { #define IOSPEED(baud) \ case baud: \ return true; \ // END #include "iospeeds_local.h" #undef IOSPEED default: return false; } } /* NOTE: Linux only supports uint8_t (decisecond) timeouts; limiting it in * this interface buys us warnings when bad constants are passed in. */ int serial_open(const char *devpath, unsigned long baud, uint8_t timeout, bool purge) { #ifdef WIN32 HANDLE hSerial = CreateFile(devpath, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, 0, NULL); if (unlikely(hSerial == INVALID_HANDLE_VALUE)) { DWORD e = GetLastError(); switch (e) { case ERROR_ACCESS_DENIED: applog(LOG_ERR, "Do not have user privileges required to open %s", devpath); break; case ERROR_SHARING_VIOLATION: applog(LOG_ERR, "%s is already in use by another process", devpath); break; default: applog(LOG_DEBUG, "Open %s failed, GetLastError:%u", devpath, (unsigned)e); break; } return -1; } if (baud) { COMMCONFIG comCfg = {0}; comCfg.dwSize = sizeof(COMMCONFIG); comCfg.wVersion = 1; comCfg.dcb.DCBlength = sizeof(DCB); comCfg.dcb.BaudRate = baud; comCfg.dcb.fBinary = 1; comCfg.dcb.fDtrControl = DTR_CONTROL_ENABLE; comCfg.dcb.fRtsControl = RTS_CONTROL_ENABLE; comCfg.dcb.ByteSize = 8; SetCommConfig(hSerial, &comCfg, sizeof(comCfg)); } // Code must specify a valid timeout value (0 means don't timeout) const DWORD ctoms = ((DWORD)timeout * 100); COMMTIMEOUTS cto = {ctoms, 0, ctoms, 0, ctoms}; SetCommTimeouts(hSerial, &cto); if (purge) { PurgeComm(hSerial, PURGE_RXABORT); PurgeComm(hSerial, PURGE_TXABORT); PurgeComm(hSerial, PURGE_RXCLEAR); PurgeComm(hSerial, PURGE_TXCLEAR); } return _open_osfhandle((intptr_t)hSerial, 0); #else int fdDev = open(devpath, O_RDWR | O_CLOEXEC | O_NOCTTY); if (unlikely(fdDev == -1)) { if (errno == EACCES) applog(LOG_ERR, "Do not have user privileges required to open %s", devpath); else applog(LOG_DEBUG, "Open %s failed: %s", devpath, bfg_strerror(errno, BST_ERRNO)); return -1; } #if defined(LOCK_EX) && defined(LOCK_NB) if (likely(!flock(fdDev, LOCK_EX | LOCK_NB))) applog(LOG_DEBUG, "Acquired exclusive advisory lock on %s", devpath); else if (errno == EWOULDBLOCK) { applog(LOG_ERR, "%s is already in use by another process", devpath); close(fdDev); return -1; } else applog(LOG_WARNING, "Failed to acquire exclusive lock on %s: %s (ignoring)", devpath, bfg_strerror(errno, BST_ERRNO)); #endif struct termios my_termios; tcgetattr(fdDev, &my_termios); #ifdef TERMIOS_DEBUG termios_debug(devpath, &my_termios, "before"); #endif if (baud) { speed_t speed = tiospeed_t(baud); if (speed == B0) applog(LOG_WARNING, "Unrecognized baud rate: %lu", baud); else { cfsetispeed(&my_termios, speed); cfsetospeed(&my_termios, speed); } } my_termios.c_cflag &= ~(CSIZE | PARENB); my_termios.c_cflag |= CS8; my_termios.c_cflag |= CREAD; #ifdef USE_AVALON // my_termios.c_cflag |= CRTSCTS; #endif my_termios.c_cflag |= CLOCAL; my_termios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); my_termios.c_oflag &= ~OPOST; my_termios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); // Code must specify a valid timeout value (0 means don't timeout) my_termios.c_cc[VTIME] = (cc_t)timeout; my_termios.c_cc[VMIN] = 0; #ifdef TERMIOS_DEBUG termios_debug(devpath, &my_termios, "settings"); #endif tcsetattr(fdDev, TCSANOW, &my_termios); #ifdef TERMIOS_DEBUG tcgetattr(fdDev, &my_termios); termios_debug(devpath, &my_termios, "after"); #endif if (purge) tcflush(fdDev, TCIOFLUSH); return fdDev; #endif } int serial_close(const int fd) { #if defined(LOCK_EX) && defined(LOCK_NB) && defined(LOCK_UN) flock(fd, LOCK_UN); #endif return close(fd); } ssize_t _serial_read(int fd, char *buf, size_t bufsiz, char *eol) { ssize_t len, tlen = 0; while (bufsiz) { len = read(fd, buf, eol ? 1 : bufsiz); if (len < 1) break; tlen += len; if (eol && *eol == buf[0]) break; buf += len; bufsiz -= len; } return tlen; } #ifndef WIN32 int get_serial_cts(int fd) { int flags; if (!fd) return -1; ioctl(fd, TIOCMGET, &flags); return (flags & TIOCM_CTS) ? 1 : 0; } int set_serial_rts(int fd, int rts) { int flags; if (!fd) return -1; ioctl(fd, TIOCMGET, &flags); if (rts) flags |= TIOCM_RTS; else flags &= ~TIOCM_RTS; ioctl(fd, TIOCMSET, &flags); return flags & TIOCM_CTS; } #else int get_serial_cts(const int fd) { if (!fd) return -1; const HANDLE fh = (HANDLE)_get_osfhandle(fd); if (!fh) return -1; DWORD flags; if (!GetCommModemStatus(fh, &flags)) return -1; return (flags & MS_CTS_ON) ? 1 : 0; } #endif // ! WIN32 struct lowlevel_driver lowl_vcom = { .dname = "vcom", .devinfo_scan = vcom_devinfo_scan, }; bfgminer-bfgminer-3.10.0/lowl-vcom.h000066400000000000000000000025661226556647300172740ustar00rootroot00000000000000#ifndef BFG_LOWL_VCOM_H #define BFG_LOWL_VCOM_H #include #include #include #include #include "deviceapi.h" struct device_drv; struct cgpu_info; struct detectone_meta_info_t { const char *manufacturer; const char *product; const char *serial; }; extern struct detectone_meta_info_t *_detectone_meta_info(); #define detectone_meta_info (*_detectone_meta_info()) extern void clear_detectone_meta_info(void); extern bool vcom_lowl_probe_wrapper(const struct lowlevel_device_info *, detectone_func_t); extern int _serial_autodetect(detectone_func_t, ...); #define serial_autodetect(...) _serial_autodetect(__VA_ARGS__, NULL) extern struct device_drv *bfg_claim_serial(struct device_drv * const, const bool verbose, const char * const devpath); #define serial_claim(devpath, drv) bfg_claim_serial(drv, false, devpath) #define serial_claim_v(devpath, drv) bfg_claim_serial(drv, true , devpath) extern int serial_open(const char *devpath, unsigned long baud, uint8_t timeout, bool purge); extern ssize_t _serial_read(int fd, char *buf, size_t buflen, char *eol); #define serial_read(fd, buf, count) \ _serial_read(fd, (char*)(buf), count, NULL) #define serial_read_line(fd, buf, bufsiz, eol) \ _serial_read(fd, buf, bufsiz, &eol) extern int serial_close(int fd); extern int get_serial_cts(int fd); extern bool valid_baud(int baud); #endif bfgminer-bfgminer-3.10.0/lowlevel.c000066400000000000000000000153201226556647300171710ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include "compat.h" #include "logging.h" #include "lowlevel.h" #include "miner.h" static struct lowlevel_device_info *devinfo_list; #if defined(HAVE_LIBUSB) || defined(NEED_BFG_LOWL_HID) char *bfg_make_devid_usb(const uint8_t usbbus, const uint8_t usbaddr) { char * const devpath = malloc(12); sprintf(devpath, "usb:%03u:%03u", (unsigned)usbbus, (unsigned)usbaddr); return devpath; } #endif void lowlevel_devinfo_semicpy(struct lowlevel_device_info * const dst, const struct lowlevel_device_info * const src) { #define COPYSTR(key) BFGINIT(dst->key, maybe_strdup(src->key)) COPYSTR(manufacturer); COPYSTR(product); COPYSTR(serial); COPYSTR(path); COPYSTR(devid); BFGINIT(dst->vid, src->vid); BFGINIT(dst->pid, src->pid); } void lowlevel_devinfo_free(struct lowlevel_device_info * const info) { if (info->ref--) return; if (info->lowl->devinfo_free) info->lowl->devinfo_free(info); free(info->manufacturer); free(info->product); free(info->serial); free(info->path); free(info->devid); free(info); } struct lowlevel_device_info *lowlevel_ref(const struct lowlevel_device_info * const cinfo) { struct lowlevel_device_info * const info = (void*)cinfo; ++info->ref; return info; } void lowlevel_scan_free() { if (!devinfo_list) return; struct lowlevel_device_info *info, *tmp; struct lowlevel_device_info *info2, *tmp2; LL_FOREACH_SAFE(devinfo_list, info, tmp) { LL_DELETE(devinfo_list, info); LL_FOREACH_SAFE2(info, info2, tmp2, same_devid_next) { LL_DELETE2(info, info2, same_devid_next); lowlevel_devinfo_free(info2); } } } struct lowlevel_device_info *lowlevel_scan() { struct lowlevel_device_info *devinfo_mid_list; lowlevel_scan_free(); #ifdef HAVE_LIBUSB devinfo_mid_list = lowl_usb.devinfo_scan(); LL_CONCAT(devinfo_list, devinfo_mid_list); #endif #ifdef USE_X6500 devinfo_mid_list = lowl_ft232r.devinfo_scan(); LL_CONCAT(devinfo_list, devinfo_mid_list); #endif #ifdef NEED_BFG_LOWL_HID devinfo_mid_list = lowl_hid.devinfo_scan(); LL_CONCAT(devinfo_list, devinfo_mid_list); #endif #ifdef USE_NANOFURY devinfo_mid_list = lowl_mcp2210.devinfo_scan(); LL_CONCAT(devinfo_list, devinfo_mid_list); #endif #ifdef NEED_BFG_LOWL_VCOM devinfo_mid_list = lowl_vcom.devinfo_scan(); LL_CONCAT(devinfo_list, devinfo_mid_list); #endif struct lowlevel_device_info *devinfo_same_prev_ht = NULL, *devinfo_same_list; LL_FOREACH(devinfo_list, devinfo_mid_list) { // Check for devid overlapping, and build a secondary linked list for them, only including the devid in the main list once (high level to low level) HASH_FIND_STR(devinfo_same_prev_ht, devinfo_mid_list->devid, devinfo_same_list); if (devinfo_same_list) { HASH_DEL(devinfo_same_prev_ht, devinfo_same_list); LL_DELETE(devinfo_list, devinfo_same_list); } LL_PREPEND2(devinfo_same_list, devinfo_mid_list, same_devid_next); HASH_ADD_KEYPTR(hh, devinfo_same_prev_ht, devinfo_mid_list->devid, strlen(devinfo_mid_list->devid), devinfo_same_list); applog(LOG_DEBUG, "%s: Found %s device at %s (path=%s, vid=%04x, pid=%04x, manuf=%s, prod=%s, serial=%s)", __func__, devinfo_mid_list->lowl->dname, devinfo_mid_list->devid, devinfo_mid_list->path, (unsigned)devinfo_mid_list->vid, (unsigned)devinfo_mid_list->pid, devinfo_mid_list->manufacturer, devinfo_mid_list->product, devinfo_mid_list->serial); } HASH_CLEAR(hh, devinfo_same_prev_ht); return devinfo_list; } bool _lowlevel_match_product(const struct lowlevel_device_info * const info, const char ** const needles) { if (!info->product) return false; for (int i = 0; needles[i]; ++i) if (!strstr(info->product, needles[i])) return false; return true; } bool lowlevel_match_id(const struct lowlevel_device_info * const info, const struct lowlevel_driver * const lowl, const int32_t vid, const int32_t pid) { if (info->lowl != lowl) return false; if (vid != -1 && vid != info->vid) return false; if (pid != -1 && pid != info->pid) return false; return true; } #define DETECT_BEGIN \ struct lowlevel_device_info *info, *tmp; \ int found = 0; \ \ LL_FOREACH_SAFE(devinfo_list, info, tmp) \ { \ // END DETECT_BEGIN #define DETECT_END \ if (!cb(info, userp)) \ continue; \ LL_DELETE(devinfo_list, info); \ ++found; \ } \ return found; \ // END DETECT_END int _lowlevel_detect(lowl_found_devinfo_func_t cb, const char *serial, const char **product_needles, void * const userp) { DETECT_BEGIN if (serial && ((!info->serial) || strcmp(serial, info->serial))) continue; if (product_needles[0] && !_lowlevel_match_product(info, product_needles)) continue; DETECT_END } int lowlevel_detect_id(const lowl_found_devinfo_func_t cb, void * const userp, const struct lowlevel_driver * const lowl, const int32_t vid, const int32_t pid) { DETECT_BEGIN if (!lowlevel_match_id(info, lowl, vid, pid)) continue; DETECT_END } struct _device_claim { struct device_drv *drv; char *devpath; UT_hash_handle hh; }; struct device_drv *bfg_claim_any(struct device_drv * const api, const char *verbose, const char * const devpath) { static struct _device_claim *claims = NULL; struct _device_claim *c; HASH_FIND_STR(claims, devpath, c); if (c) { if (verbose && opt_debug) { char logbuf[LOGBUFSIZ]; logbuf[0] = '\0'; if (api) tailsprintf(logbuf, sizeof(logbuf), "%s device ", api->dname); if (verbose[0]) tailsprintf(logbuf, sizeof(logbuf), "%s (%s)", verbose, devpath); else tailsprintf(logbuf, sizeof(logbuf), "%s", devpath); tailsprintf(logbuf, sizeof(logbuf), " already claimed by "); if (api) tailsprintf(logbuf, sizeof(logbuf), "other "); tailsprintf(logbuf, sizeof(logbuf), "driver: %s", c->drv->dname); _applog(LOG_DEBUG, logbuf); } return c->drv; } if (!api) return NULL; c = malloc(sizeof(*c)); c->devpath = strdup(devpath); c->drv = api; HASH_ADD_KEYPTR(hh, claims, c->devpath, strlen(devpath), c); return NULL; } struct device_drv *bfg_claim_any2(struct device_drv * const api, const char * const verbose, const char * const llname, const char * const path) { const size_t llnamesz = strlen(llname); const size_t pathsz = strlen(path); char devpath[llnamesz + 1 + pathsz + 1]; memcpy(devpath, llname, llnamesz); devpath[llnamesz] = ':'; memcpy(&devpath[llnamesz+1], path, pathsz + 1); return bfg_claim_any(api, verbose, devpath); } bfgminer-bfgminer-3.10.0/lowlevel.h000066400000000000000000000055611226556647300172040ustar00rootroot00000000000000#ifndef _BFG_LOWLEVEL_H #define _BFG_LOWLEVEL_H #include #include #include #include struct lowlevel_device_info; typedef bool (*lowl_found_devinfo_func_t)(struct lowlevel_device_info *, void *); struct lowlevel_driver { const char *dname; struct lowlevel_device_info *(*devinfo_scan)(); void (*devinfo_free)(struct lowlevel_device_info *); }; struct lowlevel_device_info { char *manufacturer; char *product; char *serial; char *path; char *devid; uint16_t vid; uint16_t pid; struct lowlevel_driver *lowl; void *lowl_data; struct lowlevel_device_info *next; struct lowlevel_device_info *same_devid_next; UT_hash_handle hh; pthread_t probe_pth; int ref; }; extern char *bfg_make_devid_usb(uint8_t usbbus, uint8_t usbaddr); extern struct lowlevel_device_info *lowlevel_scan(); extern bool _lowlevel_match_product(const struct lowlevel_device_info *, const char **); #define lowlevel_match_product(info, ...) \ _lowlevel_match_product(info, (const char *[]){__VA_ARGS__, NULL}) #define lowlevel_match_lowlproduct(info, matchlowl, ...) \ (matchlowl == info->lowl && _lowlevel_match_product(info, (const char *[]){__VA_ARGS__, NULL})) extern bool lowlevel_match_id(const struct lowlevel_device_info *, const struct lowlevel_driver *, int32_t vid, int32_t pid); extern int _lowlevel_detect(lowl_found_devinfo_func_t, const char *serial, const char **product_needles, void *); #define lowlevel_detect(func, ...) _lowlevel_detect(func, NULL, (const char *[]){__VA_ARGS__, NULL}, NULL) #define lowlevel_detect_serial(func, serial) _lowlevel_detect(func, serial, (const char *[]){NULL}, NULL) extern int lowlevel_detect_id(lowl_found_devinfo_func_t, void *, const struct lowlevel_driver *, int32_t vid, int32_t pid); extern void lowlevel_scan_free(); extern struct lowlevel_device_info *lowlevel_ref(const struct lowlevel_device_info *); #define lowlevel_claim(drv, verbose, info) \ bfg_claim_any(drv, (verbose) ? ((info)->path ?: "") : NULL, (info)->devid) extern void lowlevel_devinfo_semicpy(struct lowlevel_device_info *dst, const struct lowlevel_device_info *src); extern void lowlevel_devinfo_free(struct lowlevel_device_info *); #ifdef USE_X6500 extern struct lowlevel_driver lowl_ft232r; #endif #ifdef NEED_BFG_LOWL_HID extern struct lowlevel_driver lowl_hid; #endif #ifdef USE_NANOFURY extern struct lowlevel_driver lowl_mcp2210; #endif #ifdef HAVE_LIBUSB extern struct lowlevel_driver lowl_usb; #else // Dummy definition for the various "don't warn if just a lower-level interface" checks static struct lowlevel_driver lowl_usb; #endif #ifdef NEED_BFG_LOWL_VCOM extern struct lowlevel_driver lowl_vcom; #endif extern struct device_drv *bfg_claim_any(struct device_drv *, const char *verbose, const char *devpath); extern struct device_drv *bfg_claim_any2(struct device_drv *, const char *verbose, const char *llname, const char *path); #endif bfgminer-bfgminer-3.10.0/m4/000077500000000000000000000000001226556647300155135ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/m4/00gnulib.m4000066400000000000000000000025221226556647300173760ustar00rootroot00000000000000# 00gnulib.m4 serial 2 dnl Copyright (C) 2009-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl This file must be named something that sorts before all other dnl gnulib-provided .m4 files. It is needed until such time as we can dnl assume Autoconf 2.64, with its improved AC_DEFUN_ONCE semantics. # AC_DEFUN_ONCE([NAME], VALUE) # ---------------------------- # Define NAME to expand to VALUE on the first use (whether by direct # expansion, or by AC_REQUIRE), and to nothing on all subsequent uses. # Avoid bugs in AC_REQUIRE in Autoconf 2.63 and earlier. This # definition is slower than the version in Autoconf 2.64, because it # can only use interfaces that existed since 2.59; but it achieves the # same effect. Quoting is necessary to avoid confusing Automake. m4_version_prereq([2.63.263], [], [m4_define([AC][_DEFUN_ONCE], [AC][_DEFUN([$1], [AC_REQUIRE([_gl_DEFUN_ONCE([$1])], [m4_indir([_gl_DEFUN_ONCE([$1])])])])]dnl [AC][_DEFUN([_gl_DEFUN_ONCE([$1])], [$2])])]) # gl_00GNULIB # ----------- # Witness macro that this file has been included. Needed to force # Automake to include this file prior to all other gnulib .m4 files. AC_DEFUN([gl_00GNULIB]) bfgminer-bfgminer-3.10.0/m4/extensions.m4000066400000000000000000000105271226556647300201610ustar00rootroot00000000000000# serial 9 -*- Autoconf -*- # Enable extensions on systems that normally disable them. # Copyright (C) 2003, 2006-2011 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This definition of AC_USE_SYSTEM_EXTENSIONS is stolen from CVS # Autoconf. Perhaps we can remove this once we can assume Autoconf # 2.62 or later everywhere, but since CVS Autoconf mutates rapidly # enough in this area it's likely we'll need to redefine # AC_USE_SYSTEM_EXTENSIONS for quite some time. # If autoconf reports a warning # warning: AC_COMPILE_IFELSE was called before AC_USE_SYSTEM_EXTENSIONS # or warning: AC_RUN_IFELSE was called before AC_USE_SYSTEM_EXTENSIONS # the fix is # 1) to ensure that AC_USE_SYSTEM_EXTENSIONS is never directly invoked # but always AC_REQUIREd, # 2) to ensure that for each occurrence of # AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) # or # AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) # the corresponding gnulib module description has 'extensions' among # its dependencies. This will ensure that the gl_USE_SYSTEM_EXTENSIONS # invocation occurs in gl_EARLY, not in gl_INIT. # AC_USE_SYSTEM_EXTENSIONS # ------------------------ # Enable extensions on systems that normally disable them, # typically due to standards-conformance issues. # Remember that #undef in AH_VERBATIM gets replaced with #define by # AC_DEFINE. The goal here is to define all known feature-enabling # macros, then, if reports of conflicts are made, disable macros that # cause problems on some platforms (such as __EXTENSIONS__). AC_DEFUN_ONCE([AC_USE_SYSTEM_EXTENSIONS], [AC_BEFORE([$0], [AC_COMPILE_IFELSE])dnl AC_BEFORE([$0], [AC_RUN_IFELSE])dnl AC_REQUIRE([AC_CANONICAL_HOST]) AC_CHECK_HEADER([minix/config.h], [MINIX=yes], [MINIX=]) if test "$MINIX" = yes; then AC_DEFINE([_POSIX_SOURCE], [1], [Define to 1 if you need to in order for `stat' and other things to work.]) AC_DEFINE([_POSIX_1_SOURCE], [2], [Define to 2 if the system does not provide POSIX.1 features except with this defined.]) AC_DEFINE([_MINIX], [1], [Define to 1 if on MINIX.]) fi dnl HP-UX 11.11 defines mbstate_t only if _XOPEN_SOURCE is defined to 500, dnl regardless of whether the flags -Ae or _D_HPUX_SOURCE=1 are already dnl provided. case "$host_os" in hpux*) AC_DEFINE([_XOPEN_SOURCE], [500], [Define to 500 only on HP-UX.]) ;; esac AH_VERBATIM([__EXTENSIONS__], [/* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # undef _ALL_SOURCE #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # undef _GNU_SOURCE #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # undef _POSIX_PTHREAD_SEMANTICS #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # undef _TANDEM_SOURCE #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # undef __EXTENSIONS__ #endif ]) AC_CACHE_CHECK([whether it is safe to define __EXTENSIONS__], [ac_cv_safe_to_define___extensions__], [AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[ # define __EXTENSIONS__ 1 ]AC_INCLUDES_DEFAULT])], [ac_cv_safe_to_define___extensions__=yes], [ac_cv_safe_to_define___extensions__=no])]) test $ac_cv_safe_to_define___extensions__ = yes && AC_DEFINE([__EXTENSIONS__]) AC_DEFINE([_ALL_SOURCE]) AC_DEFINE([_GNU_SOURCE]) AC_DEFINE([_POSIX_PTHREAD_SEMANTICS]) AC_DEFINE([_TANDEM_SOURCE]) ])# AC_USE_SYSTEM_EXTENSIONS # gl_USE_SYSTEM_EXTENSIONS # ------------------------ # Enable extensions on systems that normally disable them, # typically due to standards-conformance issues. AC_DEFUN_ONCE([gl_USE_SYSTEM_EXTENSIONS], [ dnl Require this macro before AC_USE_SYSTEM_EXTENSIONS. dnl gnulib does not need it. But if it gets required by third-party macros dnl after AC_USE_SYSTEM_EXTENSIONS is required, autoconf 2.62..2.63 emit a dnl warning: "AC_COMPILE_IFELSE was called before AC_USE_SYSTEM_EXTENSIONS". dnl Note: We can do this only for one of the macros AC_AIX, AC_GNU_SOURCE, dnl AC_MINIX. If people still use AC_AIX or AC_MINIX, they are out of luck. AC_REQUIRE([AC_GNU_SOURCE]) AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) ]) bfgminer-bfgminer-3.10.0/m4/gnulib-cache.m4000066400000000000000000000025311226556647300202770ustar00rootroot00000000000000# Copyright (C) 2002-2011 Free Software Foundation, Inc. # # This file is free software, distributed under the terms of the GNU # General Public License. As a special exception to the GNU General # Public License, this file may be distributed as part of a program # that contains a configuration script generated by Autoconf, under # the same distribution terms as the rest of that program. # # Generated by gnulib-tool. # # This file represents the specification of how gnulib-tool is used. # It acts as a cache: It is written and read by gnulib-tool. # In projects that use version control, this file is meant to be put under # version control, like the configure.ac and various Makefile.am files. # Specification in the form of a command-line invocation: # gnulib-tool --import --dir=. --lib=libgnu --source-base=lib --m4-base=m4 --doc-base=doc --tests-base=tests --aux-dir=. --no-conditional-dependencies --no-libtool --macro-prefix=gl --no-vc-files memmem sigaction signal strtok_r # Specification in the form of a few gnulib-tool.m4 macro invocations: gl_LOCAL_DIR([]) gl_MODULES([ memmem sigaction signal strtok_r ]) gl_AVOID([]) gl_SOURCE_BASE([lib]) gl_M4_BASE([m4]) gl_PO_BASE([]) gl_DOC_BASE([doc]) gl_TESTS_BASE([tests]) gl_LIB([libgnu]) gl_MAKEFILE_NAME([]) gl_MACRO_PREFIX([gl]) gl_PO_DOMAIN([]) gl_WITNESS_C_DOMAIN([]) gl_VC_FILES([false]) bfgminer-bfgminer-3.10.0/m4/gnulib-common.m4000066400000000000000000000251571226556647300205350ustar00rootroot00000000000000# gnulib-common.m4 serial 26 dnl Copyright (C) 2007-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. # gl_COMMON # is expanded unconditionally through gnulib-tool magic. AC_DEFUN([gl_COMMON], [ dnl Use AC_REQUIRE here, so that the code is expanded once only. AC_REQUIRE([gl_00GNULIB]) AC_REQUIRE([gl_COMMON_BODY]) ]) AC_DEFUN([gl_COMMON_BODY], [ AH_VERBATIM([isoc99_inline], [/* Work around a bug in Apple GCC 4.0.1 build 5465: In C99 mode, it supports the ISO C 99 semantics of 'extern inline' (unlike the GNU C semantics of earlier versions), but does not display it by setting __GNUC_STDC_INLINE__. __APPLE__ && __MACH__ test for MacOS X. __APPLE_CC__ tests for the Apple compiler and its version. __STDC_VERSION__ tests for the C99 mode. */ #if defined __APPLE__ && defined __MACH__ && __APPLE_CC__ >= 5465 && !defined __cplusplus && __STDC_VERSION__ >= 199901L && !defined __GNUC_STDC_INLINE__ # define __GNUC_STDC_INLINE__ 1 #endif]) AH_VERBATIM([unused_parameter], [/* Define as a marker that can be attached to declarations that might not be used. This helps to reduce warnings, such as from GCC -Wunused-parameter. */ #if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 7) # define _GL_UNUSED __attribute__ ((__unused__)) #else # define _GL_UNUSED #endif /* The name _UNUSED_PARAMETER_ is an earlier spelling, although the name is a misnomer outside of parameter lists. */ #define _UNUSED_PARAMETER_ _GL_UNUSED /* The __pure__ attribute was added in gcc 2.96. */ #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96) # define _GL_ATTRIBUTE_PURE __attribute__ ((__pure__)) #else # define _GL_ATTRIBUTE_PURE /* empty */ #endif /* The __const__ attribute was added in gcc 2.95. */ #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 95) # define _GL_ATTRIBUTE_CONST __attribute__ ((__const__)) #else # define _GL_ATTRIBUTE_CONST /* empty */ #endif ]) dnl Preparation for running test programs: dnl Tell glibc to write diagnostics from -D_FORTIFY_SOURCE=2 to stderr, not dnl to /dev/tty, so they can be redirected to log files. Such diagnostics dnl arise e.g., in the macros gl_PRINTF_DIRECTIVE_N, gl_SNPRINTF_DIRECTIVE_N. LIBC_FATAL_STDERR_=1 export LIBC_FATAL_STDERR_ ]) # gl_MODULE_INDICATOR_CONDITION # expands to a C preprocessor expression that evaluates to 1 or 0, depending # whether a gnulib module that has been requested shall be considered present # or not. m4_define([gl_MODULE_INDICATOR_CONDITION], [1]) # gl_MODULE_INDICATOR_SET_VARIABLE([modulename]) # sets the shell variable that indicates the presence of the given module to # a C preprocessor expression that will evaluate to 1. AC_DEFUN([gl_MODULE_INDICATOR_SET_VARIABLE], [ gl_MODULE_INDICATOR_SET_VARIABLE_AUX( [GNULIB_[]m4_translit([[$1]], [abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___])], [gl_MODULE_INDICATOR_CONDITION]) ]) # gl_MODULE_INDICATOR_SET_VARIABLE_AUX([variable]) # modifies the shell variable to include the gl_MODULE_INDICATOR_CONDITION. # The shell variable's value is a C preprocessor expression that evaluates # to 0 or 1. AC_DEFUN([gl_MODULE_INDICATOR_SET_VARIABLE_AUX], [ m4_if(m4_defn([gl_MODULE_INDICATOR_CONDITION]), [1], [ dnl Simplify the expression VALUE || 1 to 1. $1=1 ], [gl_MODULE_INDICATOR_SET_VARIABLE_AUX_OR([$1], [gl_MODULE_INDICATOR_CONDITION])]) ]) # gl_MODULE_INDICATOR_SET_VARIABLE_AUX_OR([variable], [condition]) # modifies the shell variable to include the given condition. The shell # variable's value is a C preprocessor expression that evaluates to 0 or 1. AC_DEFUN([gl_MODULE_INDICATOR_SET_VARIABLE_AUX_OR], [ dnl Simplify the expression 1 || CONDITION to 1. if test "$[]$1" != 1; then dnl Simplify the expression 0 || CONDITION to CONDITION. if test "$[]$1" = 0; then $1=$2 else $1="($[]$1 || $2)" fi fi ]) # gl_MODULE_INDICATOR([modulename]) # defines a C macro indicating the presence of the given module # in a location where it can be used. # | Value | Value | # | in lib/ | in tests/ | # --------------------------------------------+---------+-----------+ # Module present among main modules: | 1 | 1 | # --------------------------------------------+---------+-----------+ # Module present among tests-related modules: | 0 | 1 | # --------------------------------------------+---------+-----------+ # Module not present at all: | 0 | 0 | # --------------------------------------------+---------+-----------+ AC_DEFUN([gl_MODULE_INDICATOR], [ AC_DEFINE_UNQUOTED([GNULIB_]m4_translit([[$1]], [abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___]), [gl_MODULE_INDICATOR_CONDITION], [Define to a C preprocessor expression that evaluates to 1 or 0, depending whether the gnulib module $1 shall be considered present.]) ]) # gl_MODULE_INDICATOR_FOR_TESTS([modulename]) # defines a C macro indicating the presence of the given module # in lib or tests. This is useful to determine whether the module # should be tested. # | Value | Value | # | in lib/ | in tests/ | # --------------------------------------------+---------+-----------+ # Module present among main modules: | 1 | 1 | # --------------------------------------------+---------+-----------+ # Module present among tests-related modules: | 1 | 1 | # --------------------------------------------+---------+-----------+ # Module not present at all: | 0 | 0 | # --------------------------------------------+---------+-----------+ AC_DEFUN([gl_MODULE_INDICATOR_FOR_TESTS], [ AC_DEFINE([GNULIB_TEST_]m4_translit([[$1]], [abcdefghijklmnopqrstuvwxyz./-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ___]), [1], [Define to 1 when the gnulib module $1 should be tested.]) ]) # gl_ASSERT_NO_GNULIB_POSIXCHECK # asserts that there will never be a need to #define GNULIB_POSIXCHECK. # and thereby enables an optimization of configure and config.h. # Used by Emacs. AC_DEFUN([gl_ASSERT_NO_GNULIB_POSIXCHECK], [ dnl Override gl_WARN_ON_USE_PREPARE. dnl But hide this definition from 'aclocal'. AC_DEFUN([gl_W][ARN_ON_USE_PREPARE], []) ]) # gl_ASSERT_NO_GNULIB_TESTS # asserts that there will be no gnulib tests in the scope of the configure.ac # and thereby enables an optimization of config.h. # Used by Emacs. AC_DEFUN([gl_ASSERT_NO_GNULIB_TESTS], [ dnl Override gl_MODULE_INDICATOR_FOR_TESTS. AC_DEFUN([gl_MODULE_INDICATOR_FOR_TESTS], []) ]) # Test whether exists. # Set HAVE_FEATURES_H. AC_DEFUN([gl_FEATURES_H], [ AC_CHECK_HEADERS_ONCE([features.h]) if test $ac_cv_header_features_h = yes; then HAVE_FEATURES_H=1 else HAVE_FEATURES_H=0 fi AC_SUBST([HAVE_FEATURES_H]) ]) # m4_foreach_w # is a backport of autoconf-2.59c's m4_foreach_w. # Remove this macro when we can assume autoconf >= 2.60. m4_ifndef([m4_foreach_w], [m4_define([m4_foreach_w], [m4_foreach([$1], m4_split(m4_normalize([$2]), [ ]), [$3])])]) # AS_VAR_IF(VAR, VALUE, [IF-MATCH], [IF-NOT-MATCH]) # ---------------------------------------------------- # Backport of autoconf-2.63b's macro. # Remove this macro when we can assume autoconf >= 2.64. m4_ifndef([AS_VAR_IF], [m4_define([AS_VAR_IF], [AS_IF([test x"AS_VAR_GET([$1])" = x""$2], [$3], [$4])])]) # AC_PROG_MKDIR_P # is a backport of autoconf-2.60's AC_PROG_MKDIR_P, with a fix # for interoperability with automake-1.9.6 from autoconf-2.62. # Remove this macro when we can assume autoconf >= 2.62 or # autoconf >= 2.60 && automake >= 1.10. m4_ifdef([AC_PROG_MKDIR_P], [ dnl For automake-1.9.6 && autoconf < 2.62: Ensure MKDIR_P is AC_SUBSTed. m4_define([AC_PROG_MKDIR_P], m4_defn([AC_PROG_MKDIR_P])[ AC_SUBST([MKDIR_P])])], [ dnl For autoconf < 2.60: Backport of AC_PROG_MKDIR_P. AC_DEFUN_ONCE([AC_PROG_MKDIR_P], [AC_REQUIRE([AM_PROG_MKDIR_P])dnl defined by automake MKDIR_P='$(mkdir_p)' AC_SUBST([MKDIR_P])])]) # AC_C_RESTRICT # This definition overrides the AC_C_RESTRICT macro from autoconf 2.60..2.61, # so that mixed use of GNU C and GNU C++ and mixed use of Sun C and Sun C++ # works. # This definition can be removed once autoconf >= 2.62 can be assumed. m4_if(m4_version_compare(m4_defn([m4_PACKAGE_VERSION]),[2.62]),[-1],[ AC_DEFUN([AC_C_RESTRICT], [AC_CACHE_CHECK([for C/C++ restrict keyword], [ac_cv_c_restrict], [ac_cv_c_restrict=no # The order here caters to the fact that C++ does not require restrict. for ac_kw in __restrict __restrict__ _Restrict restrict; do AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[typedef int * int_ptr; int foo (int_ptr $ac_kw ip) { return ip[0]; }]], [[int s[1]; int * $ac_kw t = s; t[0] = 0; return foo(t)]])], [ac_cv_c_restrict=$ac_kw]) test "$ac_cv_c_restrict" != no && break done ]) AH_VERBATIM([restrict], [/* Define to the equivalent of the C99 'restrict' keyword, or to nothing if this is not supported. Do not define if restrict is supported directly. */ #undef restrict /* Work around a bug in Sun C++: it does not support _Restrict, even though the corresponding Sun C compiler does, which causes "#define restrict _Restrict" in the previous line. Perhaps some future version of Sun C++ will work with _Restrict; if so, it'll probably define __RESTRICT, just as Sun C does. */ #if defined __SUNPRO_CC && !defined __RESTRICT # define _Restrict #endif]) case $ac_cv_c_restrict in restrict) ;; no) AC_DEFINE([restrict], []) ;; *) AC_DEFINE_UNQUOTED([restrict], [$ac_cv_c_restrict]) ;; esac ]) ]) # gl_BIGENDIAN # is like AC_C_BIGENDIAN, except that it can be AC_REQUIREd. # Note that AC_REQUIRE([AC_C_BIGENDIAN]) does not work reliably because some # macros invoke AC_C_BIGENDIAN with arguments. AC_DEFUN([gl_BIGENDIAN], [ AC_C_BIGENDIAN ]) # gl_CACHE_VAL_SILENT(cache-id, command-to-set-it) # is like AC_CACHE_VAL(cache-id, command-to-set-it), except that it does not # output a spurious "(cached)" mark in the midst of other configure output. # This macro should be used instead of AC_CACHE_VAL when it is not surrounded # by an AC_MSG_CHECKING/AC_MSG_RESULT pair. AC_DEFUN([gl_CACHE_VAL_SILENT], [ saved_as_echo_n="$as_echo_n" as_echo_n=':' AC_CACHE_VAL([$1], [$2]) as_echo_n="$saved_as_echo_n" ]) bfgminer-bfgminer-3.10.0/m4/gnulib-comp.m4000066400000000000000000000214361226556647300201770ustar00rootroot00000000000000# DO NOT EDIT! GENERATED AUTOMATICALLY! # Copyright (C) 2002-2011 Free Software Foundation, Inc. # # This file is free software, distributed under the terms of the GNU # General Public License. As a special exception to the GNU General # Public License, this file may be distributed as part of a program # that contains a configuration script generated by Autoconf, under # the same distribution terms as the rest of that program. # # Generated by gnulib-tool. # # This file represents the compiled summary of the specification in # gnulib-cache.m4. It lists the computed macro invocations that need # to be invoked from configure.ac. # In projects that use version control, this file can be treated like # other built files. # This macro should be invoked from ./configure.ac, in the section # "Checks for programs", right after AC_PROG_CC, and certainly before # any checks for libraries, header files, types and library functions. AC_DEFUN([gl_EARLY], [ m4_pattern_forbid([^gl_[A-Z]])dnl the gnulib macro namespace m4_pattern_allow([^gl_ES$])dnl a valid locale name m4_pattern_allow([^gl_LIBOBJS$])dnl a variable m4_pattern_allow([^gl_LTLIBOBJS$])dnl a variable AC_REQUIRE([AC_PROG_RANLIB]) # Code from module arg-nonnull: # Code from module c++defs: # Code from module extensions: AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) # Code from module include_next: # Code from module memchr: # Code from module memmem: # Code from module memmem-simple: # Code from module multiarch: # Code from module sigaction: # Code from module signal: # Code from module sigprocmask: # Code from module stddef: # Code from module stdint: # Code from module string: # Code from module strtok_r: # Code from module warn-on-use: ]) # This macro should be invoked from ./configure.ac, in the section # "Check for header files, types and library functions". AC_DEFUN([gl_INIT], [ AM_CONDITIONAL([GL_COND_LIBTOOL], [false]) gl_cond_libtool=false gl_libdeps= gl_ltlibdeps= gl_m4_base='m4' m4_pushdef([AC_LIBOBJ], m4_defn([gl_LIBOBJ])) m4_pushdef([AC_REPLACE_FUNCS], m4_defn([gl_REPLACE_FUNCS])) m4_pushdef([AC_LIBSOURCES], m4_defn([gl_LIBSOURCES])) m4_pushdef([gl_LIBSOURCES_LIST], []) m4_pushdef([gl_LIBSOURCES_DIR], []) gl_COMMON gl_source_base='lib' gl_FUNC_MEMCHR if test $HAVE_MEMCHR = 0 || test $REPLACE_MEMCHR = 1; then AC_LIBOBJ([memchr]) gl_PREREQ_MEMCHR fi gl_STRING_MODULE_INDICATOR([memchr]) gl_FUNC_MEMMEM if test $HAVE_MEMMEM = 0 || test $REPLACE_MEMMEM = 1; then AC_LIBOBJ([memmem]) fi gl_FUNC_MEMMEM_SIMPLE if test $HAVE_MEMMEM = 0 || test $REPLACE_MEMMEM = 1; then AC_LIBOBJ([memmem]) fi gl_STRING_MODULE_INDICATOR([memmem]) gl_MULTIARCH gl_SIGACTION if test $HAVE_SIGACTION = 0; then AC_LIBOBJ([sigaction]) gl_PREREQ_SIGACTION fi gl_SIGNAL_MODULE_INDICATOR([sigaction]) gl_SIGNAL_H gl_SIGNALBLOCKING if test $HAVE_POSIX_SIGNALBLOCKING = 0; then AC_LIBOBJ([sigprocmask]) gl_PREREQ_SIGPROCMASK fi gl_SIGNAL_MODULE_INDICATOR([sigprocmask]) gl_STDDEF_H gl_STDINT_H gl_HEADER_STRING_H gl_FUNC_STRTOK_R gl_STRING_MODULE_INDICATOR([strtok_r]) # End of code from modules m4_ifval(gl_LIBSOURCES_LIST, [ m4_syscmd([test ! -d ]m4_defn([gl_LIBSOURCES_DIR])[ || for gl_file in ]gl_LIBSOURCES_LIST[ ; do if test ! -r ]m4_defn([gl_LIBSOURCES_DIR])[/$gl_file ; then echo "missing file ]m4_defn([gl_LIBSOURCES_DIR])[/$gl_file" >&2 exit 1 fi done])dnl m4_if(m4_sysval, [0], [], [AC_FATAL([expected source file, required through AC_LIBSOURCES, not found])]) ]) m4_popdef([gl_LIBSOURCES_DIR]) m4_popdef([gl_LIBSOURCES_LIST]) m4_popdef([AC_LIBSOURCES]) m4_popdef([AC_REPLACE_FUNCS]) m4_popdef([AC_LIBOBJ]) AC_CONFIG_COMMANDS_PRE([ gl_libobjs= gl_ltlibobjs= if test -n "$gl_LIBOBJS"; then # Remove the extension. sed_drop_objext='s/\.o$//;s/\.obj$//' for i in `for i in $gl_LIBOBJS; do echo "$i"; done | sed -e "$sed_drop_objext" | sort | uniq`; do gl_libobjs="$gl_libobjs $i.$ac_objext" gl_ltlibobjs="$gl_ltlibobjs $i.lo" done fi AC_SUBST([gl_LIBOBJS], [$gl_libobjs]) AC_SUBST([gl_LTLIBOBJS], [$gl_ltlibobjs]) ]) gltests_libdeps= gltests_ltlibdeps= m4_pushdef([AC_LIBOBJ], m4_defn([gltests_LIBOBJ])) m4_pushdef([AC_REPLACE_FUNCS], m4_defn([gltests_REPLACE_FUNCS])) m4_pushdef([AC_LIBSOURCES], m4_defn([gltests_LIBSOURCES])) m4_pushdef([gltests_LIBSOURCES_LIST], []) m4_pushdef([gltests_LIBSOURCES_DIR], []) gl_COMMON gl_source_base='tests' changequote(,)dnl gltests_WITNESS=IN_`echo "${PACKAGE-$PACKAGE_TARNAME}" | LC_ALL=C tr abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ | LC_ALL=C sed -e 's/[^A-Z0-9_]/_/g'`_GNULIB_TESTS changequote([, ])dnl AC_SUBST([gltests_WITNESS]) gl_module_indicator_condition=$gltests_WITNESS m4_pushdef([gl_MODULE_INDICATOR_CONDITION], [$gl_module_indicator_condition]) m4_popdef([gl_MODULE_INDICATOR_CONDITION]) m4_ifval(gltests_LIBSOURCES_LIST, [ m4_syscmd([test ! -d ]m4_defn([gltests_LIBSOURCES_DIR])[ || for gl_file in ]gltests_LIBSOURCES_LIST[ ; do if test ! -r ]m4_defn([gltests_LIBSOURCES_DIR])[/$gl_file ; then echo "missing file ]m4_defn([gltests_LIBSOURCES_DIR])[/$gl_file" >&2 exit 1 fi done])dnl m4_if(m4_sysval, [0], [], [AC_FATAL([expected source file, required through AC_LIBSOURCES, not found])]) ]) m4_popdef([gltests_LIBSOURCES_DIR]) m4_popdef([gltests_LIBSOURCES_LIST]) m4_popdef([AC_LIBSOURCES]) m4_popdef([AC_REPLACE_FUNCS]) m4_popdef([AC_LIBOBJ]) AC_CONFIG_COMMANDS_PRE([ gltests_libobjs= gltests_ltlibobjs= if test -n "$gltests_LIBOBJS"; then # Remove the extension. sed_drop_objext='s/\.o$//;s/\.obj$//' for i in `for i in $gltests_LIBOBJS; do echo "$i"; done | sed -e "$sed_drop_objext" | sort | uniq`; do gltests_libobjs="$gltests_libobjs $i.$ac_objext" gltests_ltlibobjs="$gltests_ltlibobjs $i.lo" done fi AC_SUBST([gltests_LIBOBJS], [$gltests_libobjs]) AC_SUBST([gltests_LTLIBOBJS], [$gltests_ltlibobjs]) ]) LIBGNU_LIBDEPS="$gl_libdeps" AC_SUBST([LIBGNU_LIBDEPS]) LIBGNU_LTLIBDEPS="$gl_ltlibdeps" AC_SUBST([LIBGNU_LTLIBDEPS]) ]) # Like AC_LIBOBJ, except that the module name goes # into gl_LIBOBJS instead of into LIBOBJS. AC_DEFUN([gl_LIBOBJ], [ AS_LITERAL_IF([$1], [gl_LIBSOURCES([$1.c])])dnl gl_LIBOBJS="$gl_LIBOBJS $1.$ac_objext" ]) # Like AC_REPLACE_FUNCS, except that the module name goes # into gl_LIBOBJS instead of into LIBOBJS. AC_DEFUN([gl_REPLACE_FUNCS], [ m4_foreach_w([gl_NAME], [$1], [AC_LIBSOURCES(gl_NAME[.c])])dnl AC_CHECK_FUNCS([$1], , [gl_LIBOBJ($ac_func)]) ]) # Like AC_LIBSOURCES, except the directory where the source file is # expected is derived from the gnulib-tool parameterization, # and alloca is special cased (for the alloca-opt module). # We could also entirely rely on EXTRA_lib..._SOURCES. AC_DEFUN([gl_LIBSOURCES], [ m4_foreach([_gl_NAME], [$1], [ m4_if(_gl_NAME, [alloca.c], [], [ m4_define([gl_LIBSOURCES_DIR], [lib]) m4_append([gl_LIBSOURCES_LIST], _gl_NAME, [ ]) ]) ]) ]) # Like AC_LIBOBJ, except that the module name goes # into gltests_LIBOBJS instead of into LIBOBJS. AC_DEFUN([gltests_LIBOBJ], [ AS_LITERAL_IF([$1], [gltests_LIBSOURCES([$1.c])])dnl gltests_LIBOBJS="$gltests_LIBOBJS $1.$ac_objext" ]) # Like AC_REPLACE_FUNCS, except that the module name goes # into gltests_LIBOBJS instead of into LIBOBJS. AC_DEFUN([gltests_REPLACE_FUNCS], [ m4_foreach_w([gl_NAME], [$1], [AC_LIBSOURCES(gl_NAME[.c])])dnl AC_CHECK_FUNCS([$1], , [gltests_LIBOBJ($ac_func)]) ]) # Like AC_LIBSOURCES, except the directory where the source file is # expected is derived from the gnulib-tool parameterization, # and alloca is special cased (for the alloca-opt module). # We could also entirely rely on EXTRA_lib..._SOURCES. AC_DEFUN([gltests_LIBSOURCES], [ m4_foreach([_gl_NAME], [$1], [ m4_if(_gl_NAME, [alloca.c], [], [ m4_define([gltests_LIBSOURCES_DIR], [tests]) m4_append([gltests_LIBSOURCES_LIST], _gl_NAME, [ ]) ]) ]) ]) # This macro records the list of files which have been installed by # gnulib-tool and may be removed by future gnulib-tool invocations. AC_DEFUN([gl_FILE_LIST], [ build-aux/arg-nonnull.h build-aux/c++defs.h build-aux/warn-on-use.h lib/dummy.c lib/memchr.c lib/memchr.valgrind lib/memmem.c lib/sig-handler.h lib/sigaction.c lib/signal.in.h lib/sigprocmask.c lib/stddef.in.h lib/stdint.in.h lib/str-two-way.h lib/string.in.h lib/strtok_r.c m4/00gnulib.m4 m4/extensions.m4 m4/gnulib-common.m4 m4/include_next.m4 m4/longlong.m4 m4/memchr.m4 m4/memmem.m4 m4/mmap-anon.m4 m4/multiarch.m4 m4/onceonly.m4 m4/sigaction.m4 m4/signal_h.m4 m4/signalblocking.m4 m4/stddef_h.m4 m4/stdint.m4 m4/string_h.m4 m4/strtok_r.m4 m4/warn-on-use.m4 m4/wchar_t.m4 ]) bfgminer-bfgminer-3.10.0/m4/gnulib-tool.m4000066400000000000000000000026471226556647300202210ustar00rootroot00000000000000# gnulib-tool.m4 serial 2 dnl Copyright (C) 2004-2005, 2009-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl The following macros need not be invoked explicitly. dnl Invoking them does nothing except to declare default arguments dnl for "gnulib-tool --import". dnl Usage: gl_LOCAL_DIR([DIR]) AC_DEFUN([gl_LOCAL_DIR], []) dnl Usage: gl_MODULES([module1 module2 ...]) AC_DEFUN([gl_MODULES], []) dnl Usage: gl_AVOID([module1 module2 ...]) AC_DEFUN([gl_AVOID], []) dnl Usage: gl_SOURCE_BASE([DIR]) AC_DEFUN([gl_SOURCE_BASE], []) dnl Usage: gl_M4_BASE([DIR]) AC_DEFUN([gl_M4_BASE], []) dnl Usage: gl_PO_BASE([DIR]) AC_DEFUN([gl_PO_BASE], []) dnl Usage: gl_DOC_BASE([DIR]) AC_DEFUN([gl_DOC_BASE], []) dnl Usage: gl_TESTS_BASE([DIR]) AC_DEFUN([gl_TESTS_BASE], []) dnl Usage: gl_WITH_TESTS AC_DEFUN([gl_WITH_TESTS], []) dnl Usage: gl_LIB([LIBNAME]) AC_DEFUN([gl_LIB], []) dnl Usage: gl_LGPL or gl_LGPL([VERSION]) AC_DEFUN([gl_LGPL], []) dnl Usage: gl_MAKEFILE_NAME([FILENAME]) AC_DEFUN([gl_MAKEFILE_NAME], []) dnl Usage: gl_LIBTOOL AC_DEFUN([gl_LIBTOOL], []) dnl Usage: gl_MACRO_PREFIX([PREFIX]) AC_DEFUN([gl_MACRO_PREFIX], []) dnl Usage: gl_PO_DOMAIN([DOMAIN]) AC_DEFUN([gl_PO_DOMAIN], []) dnl Usage: gl_VC_FILES([BOOLEAN]) AC_DEFUN([gl_VC_FILES], []) bfgminer-bfgminer-3.10.0/m4/include_next.m4000066400000000000000000000231241226556647300204400ustar00rootroot00000000000000# include_next.m4 serial 18 dnl Copyright (C) 2006-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Paul Eggert and Derek Price. dnl Sets INCLUDE_NEXT and PRAGMA_SYSTEM_HEADER. dnl dnl INCLUDE_NEXT expands to 'include_next' if the compiler supports it, or to dnl 'include' otherwise. dnl dnl INCLUDE_NEXT_AS_FIRST_DIRECTIVE expands to 'include_next' if the compiler dnl supports it in the special case that it is the first include directive in dnl the given file, or to 'include' otherwise. dnl dnl PRAGMA_SYSTEM_HEADER can be used in files that contain #include_next, dnl so as to avoid GCC warnings when the gcc option -pedantic is used. dnl '#pragma GCC system_header' has the same effect as if the file was found dnl through the include search path specified with '-isystem' options (as dnl opposed to the search path specified with '-I' options). Namely, gcc dnl does not warn about some things, and on some systems (Solaris and Interix) dnl __STDC__ evaluates to 0 instead of to 1. The latter is an undesired side dnl effect; we are therefore careful to use 'defined __STDC__' or '1' instead dnl of plain '__STDC__'. dnl dnl PRAGMA_COLUMNS can be used in files that override system header files, so dnl as to avoid compilation errors on HP NonStop systems when the gnulib file dnl is included by a system header file that does a "#pragma COLUMNS 80" (which dnl has the effect of truncating the lines of that file and all files that it dnl includes to 80 columns) and the gnulib file has lines longer than 80 dnl columns. AC_DEFUN([gl_INCLUDE_NEXT], [ AC_LANG_PREPROC_REQUIRE() AC_CACHE_CHECK([whether the preprocessor supports include_next], [gl_cv_have_include_next], [rm -rf conftestd1a conftestd1b conftestd2 mkdir conftestd1a conftestd1b conftestd2 dnl IBM C 9.0, 10.1 (original versions, prior to the 2009-01 updates) on dnl AIX 6.1 support include_next when used as first preprocessor directive dnl in a file, but not when preceded by another include directive. Check dnl for this bug by including . dnl Additionally, with this same compiler, include_next is a no-op when dnl used in a header file that was included by specifying its absolute dnl file name. Despite these two bugs, include_next is used in the dnl compiler's . By virtue of the second bug, we need to use dnl include_next as well in this case. cat < conftestd1a/conftest.h #define DEFINED_IN_CONFTESTD1 #include_next #ifdef DEFINED_IN_CONFTESTD2 int foo; #else #error "include_next doesn't work" #endif EOF cat < conftestd1b/conftest.h #define DEFINED_IN_CONFTESTD1 #include #include_next #ifdef DEFINED_IN_CONFTESTD2 int foo; #else #error "include_next doesn't work" #endif EOF cat < conftestd2/conftest.h #ifndef DEFINED_IN_CONFTESTD1 #error "include_next test doesn't work" #endif #define DEFINED_IN_CONFTESTD2 EOF gl_save_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$gl_save_CPPFLAGS -Iconftestd1b -Iconftestd2" dnl We intentionally avoid using AC_LANG_SOURCE here. AC_COMPILE_IFELSE([AC_LANG_DEFINES_PROVIDED[#include ]], [gl_cv_have_include_next=yes], [CPPFLAGS="$gl_save_CPPFLAGS -Iconftestd1a -Iconftestd2" AC_COMPILE_IFELSE([AC_LANG_DEFINES_PROVIDED[#include ]], [gl_cv_have_include_next=buggy], [gl_cv_have_include_next=no]) ]) CPPFLAGS="$gl_save_CPPFLAGS" rm -rf conftestd1a conftestd1b conftestd2 ]) PRAGMA_SYSTEM_HEADER= if test $gl_cv_have_include_next = yes; then INCLUDE_NEXT=include_next INCLUDE_NEXT_AS_FIRST_DIRECTIVE=include_next if test -n "$GCC"; then PRAGMA_SYSTEM_HEADER='#pragma GCC system_header' fi else if test $gl_cv_have_include_next = buggy; then INCLUDE_NEXT=include INCLUDE_NEXT_AS_FIRST_DIRECTIVE=include_next else INCLUDE_NEXT=include INCLUDE_NEXT_AS_FIRST_DIRECTIVE=include fi fi AC_SUBST([INCLUDE_NEXT]) AC_SUBST([INCLUDE_NEXT_AS_FIRST_DIRECTIVE]) AC_SUBST([PRAGMA_SYSTEM_HEADER]) AC_CACHE_CHECK([whether system header files limit the line length], [gl_cv_pragma_columns], [dnl HP NonStop systems, which define __TANDEM, have this misfeature. AC_EGREP_CPP([choke me], [ #ifdef __TANDEM choke me #endif ], [gl_cv_pragma_columns=yes], [gl_cv_pragma_columns=no]) ]) if test $gl_cv_pragma_columns = yes; then PRAGMA_COLUMNS="#pragma COLUMNS 10000" else PRAGMA_COLUMNS= fi AC_SUBST([PRAGMA_COLUMNS]) ]) # gl_CHECK_NEXT_HEADERS(HEADER1 HEADER2 ...) # ------------------------------------------ # For each arg foo.h, if #include_next works, define NEXT_FOO_H to be # ''; otherwise define it to be # '"///usr/include/foo.h"', or whatever other absolute file name is suitable. # Also, if #include_next works as first preprocessing directive in a file, # define NEXT_AS_FIRST_DIRECTIVE_FOO_H to be ''; otherwise define it to # be # '"///usr/include/foo.h"', or whatever other absolute file name is suitable. # That way, a header file with the following line: # #@INCLUDE_NEXT@ @NEXT_FOO_H@ # or # #@INCLUDE_NEXT_AS_FIRST_DIRECTIVE@ @NEXT_AS_FIRST_DIRECTIVE_FOO_H@ # behaves (after sed substitution) as if it contained # #include_next # even if the compiler does not support include_next. # The three "///" are to pacify Sun C 5.8, which otherwise would say # "warning: #include of /usr/include/... may be non-portable". # Use `""', not `<>', so that the /// cannot be confused with a C99 comment. # Note: This macro assumes that the header file is not empty after # preprocessing, i.e. it does not only define preprocessor macros but also # provides some type/enum definitions or function/variable declarations. # # This macro also checks whether each header exists, by invoking # AC_CHECK_HEADERS_ONCE or AC_CHECK_HEADERS on each argument. AC_DEFUN([gl_CHECK_NEXT_HEADERS], [ gl_NEXT_HEADERS_INTERNAL([$1], [check]) ]) # gl_NEXT_HEADERS(HEADER1 HEADER2 ...) # ------------------------------------ # Like gl_CHECK_NEXT_HEADERS, except do not check whether the headers exist. # This is suitable for headers like that are standardized by C89 # and therefore can be assumed to exist. AC_DEFUN([gl_NEXT_HEADERS], [ gl_NEXT_HEADERS_INTERNAL([$1], [assume]) ]) # The guts of gl_CHECK_NEXT_HEADERS and gl_NEXT_HEADERS. AC_DEFUN([gl_NEXT_HEADERS_INTERNAL], [ AC_REQUIRE([gl_INCLUDE_NEXT]) AC_REQUIRE([AC_CANONICAL_HOST]) m4_if([$2], [check], [AC_CHECK_HEADERS_ONCE([$1]) ]) m4_foreach_w([gl_HEADER_NAME], [$1], [AS_VAR_PUSHDEF([gl_next_header], [gl_cv_next_]m4_defn([gl_HEADER_NAME])) if test $gl_cv_have_include_next = yes; then AS_VAR_SET([gl_next_header], ['<'gl_HEADER_NAME'>']) else AC_CACHE_CHECK( [absolute name of <]m4_defn([gl_HEADER_NAME])[>], m4_defn([gl_next_header]), [m4_if([$2], [check], [AS_VAR_PUSHDEF([gl_header_exists], [ac_cv_header_]m4_defn([gl_HEADER_NAME])) if test AS_VAR_GET(gl_header_exists) = yes; then AS_VAR_POPDEF([gl_header_exists]) ]) AC_LANG_CONFTEST( [AC_LANG_SOURCE( [[#include <]]m4_dquote(m4_defn([gl_HEADER_NAME]))[[>]] )]) dnl AIX "xlc -E" and "cc -E" omit #line directives for header dnl files that contain only a #include of other header files and dnl no non-comment tokens of their own. This leads to a failure dnl to detect the absolute name of , , dnl and others. The workaround is to force preservation dnl of comments through option -C. This ensures all necessary dnl #line directives are present. GCC supports option -C as well. case "$host_os" in aix*) gl_absname_cpp="$ac_cpp -C" ;; *) gl_absname_cpp="$ac_cpp" ;; esac dnl eval is necessary to expand gl_absname_cpp. dnl Ultrix and Pyramid sh refuse to redirect output of eval, dnl so use subshell. AS_VAR_SET([gl_next_header], ['"'`(eval "$gl_absname_cpp conftest.$ac_ext") 2>&AS_MESSAGE_LOG_FD | sed -n '\#/]m4_defn([gl_HEADER_NAME])[#{ s#.*"\(.*/]m4_defn([gl_HEADER_NAME])[\)".*#\1# s#^/[^/]#//&# p q }'`'"']) m4_if([$2], [check], [else AS_VAR_SET([gl_next_header], ['<'gl_HEADER_NAME'>']) fi ]) ]) fi AC_SUBST( AS_TR_CPP([NEXT_]m4_defn([gl_HEADER_NAME])), [AS_VAR_GET([gl_next_header])]) if test $gl_cv_have_include_next = yes || test $gl_cv_have_include_next = buggy; then # INCLUDE_NEXT_AS_FIRST_DIRECTIVE='include_next' gl_next_as_first_directive='<'gl_HEADER_NAME'>' else # INCLUDE_NEXT_AS_FIRST_DIRECTIVE='include' gl_next_as_first_directive=AS_VAR_GET([gl_next_header]) fi AC_SUBST( AS_TR_CPP([NEXT_AS_FIRST_DIRECTIVE_]m4_defn([gl_HEADER_NAME])), [$gl_next_as_first_directive]) AS_VAR_POPDEF([gl_next_header])]) ]) # Autoconf 2.68 added warnings for our use of AC_COMPILE_IFELSE; # this fallback is safe for all earlier autoconf versions. m4_define_default([AC_LANG_DEFINES_PROVIDED]) bfgminer-bfgminer-3.10.0/m4/longlong.m4000066400000000000000000000112031226556647300175710ustar00rootroot00000000000000# longlong.m4 serial 16 dnl Copyright (C) 1999-2007, 2009-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Paul Eggert. # Define HAVE_LONG_LONG_INT if 'long long int' works. # This fixes a bug in Autoconf 2.61, and can be faster # than what's in Autoconf 2.62 through 2.68. # Note: If the type 'long long int' exists but is only 32 bits large # (as on some very old compilers), HAVE_LONG_LONG_INT will not be # defined. In this case you can treat 'long long int' like 'long int'. AC_DEFUN([AC_TYPE_LONG_LONG_INT], [ AC_REQUIRE([AC_TYPE_UNSIGNED_LONG_LONG_INT]) AC_CACHE_CHECK([for long long int], [ac_cv_type_long_long_int], [ac_cv_type_long_long_int=yes if test "x${ac_cv_prog_cc_c99-no}" = xno; then ac_cv_type_long_long_int=$ac_cv_type_unsigned_long_long_int if test $ac_cv_type_long_long_int = yes; then dnl Catch a bug in Tandem NonStop Kernel (OSS) cc -O circa 2004. dnl If cross compiling, assume the bug is not important, since dnl nobody cross compiles for this platform as far as we know. AC_RUN_IFELSE( [AC_LANG_PROGRAM( [[@%:@include @%:@ifndef LLONG_MAX @%:@ define HALF \ (1LL << (sizeof (long long int) * CHAR_BIT - 2)) @%:@ define LLONG_MAX (HALF - 1 + HALF) @%:@endif]], [[long long int n = 1; int i; for (i = 0; ; i++) { long long int m = n << i; if (m >> i != n) return 1; if (LLONG_MAX / 2 < m) break; } return 0;]])], [], [ac_cv_type_long_long_int=no], [:]) fi fi]) if test $ac_cv_type_long_long_int = yes; then AC_DEFINE([HAVE_LONG_LONG_INT], [1], [Define to 1 if the system has the type `long long int'.]) fi ]) # Define HAVE_UNSIGNED_LONG_LONG_INT if 'unsigned long long int' works. # This fixes a bug in Autoconf 2.61, and can be faster # than what's in Autoconf 2.62 through 2.68. # Note: If the type 'unsigned long long int' exists but is only 32 bits # large (as on some very old compilers), AC_TYPE_UNSIGNED_LONG_LONG_INT # will not be defined. In this case you can treat 'unsigned long long int' # like 'unsigned long int'. AC_DEFUN([AC_TYPE_UNSIGNED_LONG_LONG_INT], [ AC_CACHE_CHECK([for unsigned long long int], [ac_cv_type_unsigned_long_long_int], [ac_cv_type_unsigned_long_long_int=yes if test "x${ac_cv_prog_cc_c99-no}" = xno; then AC_LINK_IFELSE( [_AC_TYPE_LONG_LONG_SNIPPET], [], [ac_cv_type_unsigned_long_long_int=no]) fi]) if test $ac_cv_type_unsigned_long_long_int = yes; then AC_DEFINE([HAVE_UNSIGNED_LONG_LONG_INT], [1], [Define to 1 if the system has the type `unsigned long long int'.]) fi ]) # Expands to a C program that can be used to test for simultaneous support # of 'long long' and 'unsigned long long'. We don't want to say that # 'long long' is available if 'unsigned long long' is not, or vice versa, # because too many programs rely on the symmetry between signed and unsigned # integer types (excluding 'bool'). AC_DEFUN([_AC_TYPE_LONG_LONG_SNIPPET], [ AC_LANG_PROGRAM( [[/* For now, do not test the preprocessor; as of 2007 there are too many implementations with broken preprocessors. Perhaps this can be revisited in 2012. In the meantime, code should not expect #if to work with literals wider than 32 bits. */ /* Test literals. */ long long int ll = 9223372036854775807ll; long long int nll = -9223372036854775807LL; unsigned long long int ull = 18446744073709551615ULL; /* Test constant expressions. */ typedef int a[((-9223372036854775807LL < 0 && 0 < 9223372036854775807ll) ? 1 : -1)]; typedef int b[(18446744073709551615ULL <= (unsigned long long int) -1 ? 1 : -1)]; int i = 63;]], [[/* Test availability of runtime routines for shift and division. */ long long int llmax = 9223372036854775807ll; unsigned long long int ullmax = 18446744073709551615ull; return ((ll << 63) | (ll >> 63) | (ll < i) | (ll > i) | (llmax / ll) | (llmax % ll) | (ull << 63) | (ull >> 63) | (ull << i) | (ull >> i) | (ullmax / ull) | (ullmax % ull));]]) ]) bfgminer-bfgminer-3.10.0/m4/memchr.m4000066400000000000000000000053401226556647300172320ustar00rootroot00000000000000# memchr.m4 serial 12 dnl Copyright (C) 2002-2004, 2009-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. AC_DEFUN_ONCE([gl_FUNC_MEMCHR], [ dnl Check for prerequisites for memory fence checks. gl_FUNC_MMAP_ANON AC_CHECK_HEADERS_ONCE([sys/mman.h]) AC_CHECK_FUNCS_ONCE([mprotect]) AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) m4_ifdef([gl_FUNC_MEMCHR_OBSOLETE], [ dnl These days, we assume memchr is present. But if support for old dnl platforms is desired: AC_CHECK_FUNCS_ONCE([memchr]) if test $ac_cv_func_memchr = no; then HAVE_MEMCHR=0 fi ]) if test $HAVE_MEMCHR = 1; then # Detect platform-specific bugs in some versions of glibc: # memchr should not dereference anything with length 0 # http://bugzilla.redhat.com/499689 # memchr should not dereference overestimated length after a match # http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=521737 # http://sourceware.org/bugzilla/show_bug.cgi?id=10162 # Assume that memchr works on platforms that lack mprotect. AC_CACHE_CHECK([whether memchr works], [gl_cv_func_memchr_works], [AC_RUN_IFELSE([AC_LANG_PROGRAM([[ #include #if HAVE_SYS_MMAN_H # include # include # include # include # ifndef MAP_FILE # define MAP_FILE 0 # endif #endif ]], [[ int result = 0; char *fence = NULL; #if HAVE_SYS_MMAN_H && HAVE_MPROTECT # if HAVE_MAP_ANONYMOUS const int flags = MAP_ANONYMOUS | MAP_PRIVATE; const int fd = -1; # else /* !HAVE_MAP_ANONYMOUS */ const int flags = MAP_FILE | MAP_PRIVATE; int fd = open ("/dev/zero", O_RDONLY, 0666); if (fd >= 0) # endif { int pagesize = getpagesize (); char *two_pages = (char *) mmap (NULL, 2 * pagesize, PROT_READ | PROT_WRITE, flags, fd, 0); if (two_pages != (char *)(-1) && mprotect (two_pages + pagesize, pagesize, PROT_NONE) == 0) fence = two_pages + pagesize; } #endif if (fence) { if (memchr (fence, 0, 0)) result |= 1; strcpy (fence - 9, "12345678"); if (memchr (fence - 9, 0, 79) != fence - 1) result |= 2; if (memchr (fence - 1, 0, 3) != fence - 1) result |= 4; } return result; ]])], [gl_cv_func_memchr_works=yes], [gl_cv_func_memchr_works=no], [dnl Be pessimistic for now. gl_cv_func_memchr_works="guessing no"])]) if test "$gl_cv_func_memchr_works" != yes; then REPLACE_MEMCHR=1 fi fi ]) # Prerequisites of lib/memchr.c. AC_DEFUN([gl_PREREQ_MEMCHR], [ AC_CHECK_HEADERS([bp-sym.h]) ]) bfgminer-bfgminer-3.10.0/m4/memmem.m4000066400000000000000000000110741226556647300172350ustar00rootroot00000000000000# memmem.m4 serial 23 dnl Copyright (C) 2002-2004, 2007-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl Check that memmem is present and functional. AC_DEFUN([gl_FUNC_MEMMEM_SIMPLE], [ dnl Persuade glibc to declare memmem(). AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) AC_CHECK_FUNCS([memmem]) if test $ac_cv_func_memmem = yes; then HAVE_MEMMEM=1 else HAVE_MEMMEM=0 fi AC_CHECK_DECLS_ONCE([memmem]) if test $ac_cv_have_decl_memmem = no; then HAVE_DECL_MEMMEM=0 else dnl Detect http://sourceware.org/bugzilla/show_bug.cgi?id=12092. dnl Also check that we handle empty needles correctly. AC_CACHE_CHECK([whether memmem works], [gl_cv_func_memmem_works_always], [AC_RUN_IFELSE([AC_LANG_PROGRAM([[ #include /* for memmem */ #define P "_EF_BF_BD" #define HAYSTACK "F_BD_CE_BD" P P P P "_C3_88_20" P P P "_C3_A7_20" P #define NEEDLE P P P P P ]], [[ int result = 0; if (memmem (HAYSTACK, strlen (HAYSTACK), NEEDLE, strlen (NEEDLE))) result |= 1; /* Check for empty needle behavior. */ { const char *haystack = "AAA"; if (memmem (haystack, 3, NULL, 0) != haystack) result |= 2; } return result; ]])], [gl_cv_func_memmem_works_always=yes], [gl_cv_func_memmem_works_always=no], [dnl glibc 2.9..2.12 and cygwin 1.7.7 have issue #12092 above. dnl Also empty needles work on glibc >= 2.1 and cygwin >= 1.7.0. dnl uClibc is not affected, since it uses different source code. dnl Assume that it works on all other platforms (even if not linear). AC_EGREP_CPP([Lucky user], [ #ifdef __GNU_LIBRARY__ #include #if ((__GLIBC__ == 2 && ((__GLIBC_MINOR > 0 && __GLIBC_MINOR__ < 9) \ || __GLIBC_MINOR__ > 12)) \ || (__GLIBC__ > 2)) \ || defined __UCLIBC__ Lucky user #endif #elif defined __CYGWIN__ #include #if CYGWIN_VERSION_DLL_COMBINED > CYGWIN_VERSION_DLL_MAKE_COMBINED (1007, 7) Lucky user #endif #else Lucky user #endif ], [gl_cv_func_memmem_works_always=yes], [gl_cv_func_memmem_works_always="guessing no"]) ]) ]) if test "$gl_cv_func_memmem_works_always" != yes; then REPLACE_MEMMEM=1 fi fi gl_PREREQ_MEMMEM ]) # gl_FUNC_MEMMEM_SIMPLE dnl Additionally, check that memmem has linear performance characteristics AC_DEFUN([gl_FUNC_MEMMEM], [ AC_REQUIRE([gl_FUNC_MEMMEM_SIMPLE]) if test $HAVE_DECL_MEMMEM = 1 && test $REPLACE_MEMMEM = 0; then AC_CACHE_CHECK([whether memmem works in linear time], [gl_cv_func_memmem_works_fast], [AC_RUN_IFELSE([AC_LANG_PROGRAM([[ #include /* for signal */ #include /* for memmem */ #include /* for malloc */ #include /* for alarm */ static void quit (int sig) { exit (sig + 128); } ]], [[ int result = 0; size_t m = 1000000; char *haystack = (char *) malloc (2 * m + 1); char *needle = (char *) malloc (m + 1); /* Failure to compile this test due to missing alarm is okay, since all such platforms (mingw) also lack memmem. */ signal (SIGALRM, quit); alarm (5); /* Check for quadratic performance. */ if (haystack && needle) { memset (haystack, 'A', 2 * m); haystack[2 * m] = 'B'; memset (needle, 'A', m); needle[m] = 'B'; if (!memmem (haystack, 2 * m + 1, needle, m + 1)) result |= 1; } return result; ]])], [gl_cv_func_memmem_works_fast=yes], [gl_cv_func_memmem_works_fast=no], [dnl Only glibc >= 2.9 and cygwin > 1.7.0 are known to have a dnl memmem that works in linear time. AC_EGREP_CPP([Lucky user], [ #include #ifdef __GNU_LIBRARY__ #if ((__GLIBC__ == 2 && __GLIBC_MINOR__ >= 9) || (__GLIBC__ > 2)) \ && !defined __UCLIBC__ Lucky user #endif #endif #ifdef __CYGWIN__ #include #if CYGWIN_VERSION_DLL_COMBINED > CYGWIN_VERSION_DLL_MAKE_COMBINED (1007, 0) Lucky user #endif #endif ], [gl_cv_func_memmem_works_fast=yes], [gl_cv_func_memmem_works_fast="guessing no"]) ]) ]) if test "$gl_cv_func_memmem_works_fast" != yes; then REPLACE_MEMMEM=1 fi fi ]) # gl_FUNC_MEMMEM # Prerequisites of lib/memmem.c. AC_DEFUN([gl_PREREQ_MEMMEM], [:]) bfgminer-bfgminer-3.10.0/m4/mmap-anon.m4000066400000000000000000000037251226556647300176470ustar00rootroot00000000000000# mmap-anon.m4 serial 9 dnl Copyright (C) 2005, 2007, 2009-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. # Detect how mmap can be used to create anonymous (not file-backed) memory # mappings. # - On Linux, AIX, OSF/1, Solaris, Cygwin, Interix, Haiku, both MAP_ANONYMOUS # and MAP_ANON exist and have the same value. # - On HP-UX, only MAP_ANONYMOUS exists. # - On MacOS X, FreeBSD, NetBSD, OpenBSD, only MAP_ANON exists. # - On IRIX, neither exists, and a file descriptor opened to /dev/zero must be # used. AC_DEFUN([gl_FUNC_MMAP_ANON], [ dnl Persuade glibc to define MAP_ANONYMOUS. AC_REQUIRE([gl_USE_SYSTEM_EXTENSIONS]) # Check for mmap(). Don't use AC_FUNC_MMAP, because it checks too much: it # fails on HP-UX 11, because MAP_FIXED mappings do not work. But this is # irrelevant for anonymous mappings. AC_CHECK_FUNC([mmap], [gl_have_mmap=yes], [gl_have_mmap=no]) # Try to allow MAP_ANONYMOUS. gl_have_mmap_anonymous=no if test $gl_have_mmap = yes; then AC_MSG_CHECKING([for MAP_ANONYMOUS]) AC_EGREP_CPP([I cant identify this map.], [ #include #ifdef MAP_ANONYMOUS I cant identify this map. #endif ], [gl_have_mmap_anonymous=yes]) if test $gl_have_mmap_anonymous != yes; then AC_EGREP_CPP([I cant identify this map.], [ #include #ifdef MAP_ANON I cant identify this map. #endif ], [AC_DEFINE([MAP_ANONYMOUS], [MAP_ANON], [Define to a substitute value for mmap()'s MAP_ANONYMOUS flag.]) gl_have_mmap_anonymous=yes]) fi AC_MSG_RESULT([$gl_have_mmap_anonymous]) if test $gl_have_mmap_anonymous = yes; then AC_DEFINE([HAVE_MAP_ANONYMOUS], [1], [Define to 1 if mmap()'s MAP_ANONYMOUS flag is available after including config.h and .]) fi fi ]) bfgminer-bfgminer-3.10.0/m4/multiarch.m4000066400000000000000000000036731226556647300177560ustar00rootroot00000000000000# multiarch.m4 serial 6 dnl Copyright (C) 2008-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. # Determine whether the compiler is or may be producing universal binaries. # # On MacOS X 10.5 and later systems, the user can create libraries and # executables that work on multiple system types--known as "fat" or # "universal" binaries--by specifying multiple '-arch' options to the # compiler but only a single '-arch' option to the preprocessor. Like # this: # # ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ # CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ # CPP="gcc -E" CXXCPP="g++ -E" # # Detect this situation and set APPLE_UNIVERSAL_BUILD accordingly. AC_DEFUN_ONCE([gl_MULTIARCH], [ dnl Code similar to autoconf-2.63 AC_C_BIGENDIAN. gl_cv_c_multiarch=no AC_COMPILE_IFELSE( [AC_LANG_SOURCE( [[#ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; ]])], [ dnl Check for potential -arch flags. It is not universal unless dnl there are at least two -arch flags with different values. arch= prev= for word in ${CC} ${CFLAGS} ${CPPFLAGS} ${LDFLAGS}; do if test -n "$prev"; then case $word in i?86 | x86_64 | ppc | ppc64) if test -z "$arch" || test "$arch" = "$word"; then arch="$word" else gl_cv_c_multiarch=yes fi ;; esac prev= else if test "x$word" = "x-arch"; then prev=arch fi fi done ]) if test $gl_cv_c_multiarch = yes; then APPLE_UNIVERSAL_BUILD=1 else APPLE_UNIVERSAL_BUILD=0 fi AC_SUBST([APPLE_UNIVERSAL_BUILD]) ]) bfgminer-bfgminer-3.10.0/m4/onceonly.m4000066400000000000000000000075461226556647300176170ustar00rootroot00000000000000# onceonly.m4 serial 7 dnl Copyright (C) 2002-2003, 2005-2006, 2008-2011 Free Software Foundation, dnl Inc. dnl This file is free software, distributed under the terms of the GNU dnl General Public License. As a special exception to the GNU General dnl Public License, this file may be distributed as part of a program dnl that contains a configuration script generated by Autoconf, under dnl the same distribution terms as the rest of that program. dnl This file defines some "once only" variants of standard autoconf macros. dnl AC_CHECK_HEADERS_ONCE like AC_CHECK_HEADERS dnl AC_CHECK_FUNCS_ONCE like AC_CHECK_FUNCS dnl AC_CHECK_DECLS_ONCE like AC_CHECK_DECLS dnl AC_REQUIRE([AC_FUNC_STRCOLL]) like AC_FUNC_STRCOLL dnl The advantage is that the check for each of the headers/functions/decls dnl will be put only once into the 'configure' file. It keeps the size of dnl the 'configure' file down, and avoids redundant output when 'configure' dnl is run. dnl The drawback is that the checks cannot be conditionalized. If you write dnl if some_condition; then gl_CHECK_HEADERS(stdlib.h); fi dnl inside an AC_DEFUNed function, the gl_CHECK_HEADERS macro call expands to dnl empty, and the check will be inserted before the body of the AC_DEFUNed dnl function. dnl The original code implemented AC_CHECK_HEADERS_ONCE and AC_CHECK_FUNCS_ONCE dnl in terms of AC_DEFUN and AC_REQUIRE. This implementation uses diversions to dnl named sections DEFAULTS and INIT_PREPARE in order to check all requested dnl headers at once, thus reducing the size of 'configure'. It is known to work dnl with autoconf 2.57..2.62 at least . The size reduction is ca. 9%. dnl Autoconf version 2.59 plus gnulib is required; this file is not needed dnl with Autoconf 2.60 or greater. But note that autoconf's implementation of dnl AC_CHECK_DECLS_ONCE expects a comma-separated list of symbols as first dnl argument! AC_PREREQ([2.59]) # AC_CHECK_HEADERS_ONCE(HEADER1 HEADER2 ...) is a once-only variant of # AC_CHECK_HEADERS(HEADER1 HEADER2 ...). AC_DEFUN([AC_CHECK_HEADERS_ONCE], [ : m4_foreach_w([gl_HEADER_NAME], [$1], [ AC_DEFUN([gl_CHECK_HEADER_]m4_quote(m4_translit(gl_HEADER_NAME, [./-], [___])), [ m4_divert_text([INIT_PREPARE], [gl_header_list="$gl_header_list gl_HEADER_NAME"]) gl_HEADERS_EXPANSION AH_TEMPLATE(AS_TR_CPP([HAVE_]m4_defn([gl_HEADER_NAME])), [Define to 1 if you have the <]m4_defn([gl_HEADER_NAME])[> header file.]) ]) AC_REQUIRE([gl_CHECK_HEADER_]m4_quote(m4_translit(gl_HEADER_NAME, [./-], [___]))) ]) ]) m4_define([gl_HEADERS_EXPANSION], [ m4_divert_text([DEFAULTS], [gl_header_list=]) AC_CHECK_HEADERS([$gl_header_list]) m4_define([gl_HEADERS_EXPANSION], []) ]) # AC_CHECK_FUNCS_ONCE(FUNC1 FUNC2 ...) is a once-only variant of # AC_CHECK_FUNCS(FUNC1 FUNC2 ...). AC_DEFUN([AC_CHECK_FUNCS_ONCE], [ : m4_foreach_w([gl_FUNC_NAME], [$1], [ AC_DEFUN([gl_CHECK_FUNC_]m4_defn([gl_FUNC_NAME]), [ m4_divert_text([INIT_PREPARE], [gl_func_list="$gl_func_list gl_FUNC_NAME"]) gl_FUNCS_EXPANSION AH_TEMPLATE(AS_TR_CPP([HAVE_]m4_defn([gl_FUNC_NAME])), [Define to 1 if you have the `]m4_defn([gl_FUNC_NAME])[' function.]) ]) AC_REQUIRE([gl_CHECK_FUNC_]m4_defn([gl_FUNC_NAME])) ]) ]) m4_define([gl_FUNCS_EXPANSION], [ m4_divert_text([DEFAULTS], [gl_func_list=]) AC_CHECK_FUNCS([$gl_func_list]) m4_define([gl_FUNCS_EXPANSION], []) ]) # AC_CHECK_DECLS_ONCE(DECL1 DECL2 ...) is a once-only variant of # AC_CHECK_DECLS(DECL1, DECL2, ...). AC_DEFUN([AC_CHECK_DECLS_ONCE], [ : m4_foreach_w([gl_DECL_NAME], [$1], [ AC_DEFUN([gl_CHECK_DECL_]m4_defn([gl_DECL_NAME]), [ AC_CHECK_DECLS(m4_defn([gl_DECL_NAME])) ]) AC_REQUIRE([gl_CHECK_DECL_]m4_defn([gl_DECL_NAME])) ]) ]) bfgminer-bfgminer-3.10.0/m4/sigaction.m4000066400000000000000000000023611226556647300177370ustar00rootroot00000000000000# sigaction.m4 serial 6 dnl Copyright (C) 2008-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. # Determine if sigaction interface is present. AC_DEFUN([gl_SIGACTION], [ AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) AC_CHECK_FUNCS_ONCE([sigaction]) if test $ac_cv_func_sigaction = yes; then AC_CHECK_MEMBERS([struct sigaction.sa_sigaction], , , [[#include ]]) if test $ac_cv_member_struct_sigaction_sa_sigaction = no; then HAVE_STRUCT_SIGACTION_SA_SIGACTION=0 fi else HAVE_SIGACTION=0 fi ]) # Prerequisites of the part of lib/signal.in.h and of lib/sigaction.c. AC_DEFUN([gl_PREREQ_SIGACTION], [ AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) AC_REQUIRE([AC_C_RESTRICT]) AC_REQUIRE([AC_TYPE_UID_T]) AC_REQUIRE([gl_PREREQ_SIG_HANDLER_H]) AC_CHECK_FUNCS_ONCE([sigaltstack siginterrupt]) AC_CHECK_TYPES([siginfo_t], [], [], [[ #include ]]) if test $ac_cv_type_siginfo_t = no; then HAVE_SIGINFO_T=0 fi ]) # Prerequisites of lib/sig-handler.h. AC_DEFUN([gl_PREREQ_SIG_HANDLER_H], [ AC_REQUIRE([AC_C_INLINE]) ]) bfgminer-bfgminer-3.10.0/m4/signal_h.m4000066400000000000000000000043331226556647300175440ustar00rootroot00000000000000# signal_h.m4 serial 12 dnl Copyright (C) 2007-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. AC_DEFUN([gl_SIGNAL_H], [ AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) gl_NEXT_HEADERS([signal.h]) # AIX declares sig_atomic_t to already include volatile, and C89 compilers # then choke on 'volatile sig_atomic_t'. C99 requires that it compile. AC_CHECK_TYPE([volatile sig_atomic_t], [], [HAVE_TYPE_VOLATILE_SIG_ATOMIC_T=0], [[ #include ]]) AC_REQUIRE([AC_TYPE_UID_T]) dnl Persuade glibc to define sighandler_t. AC_REQUIRE([AC_USE_SYSTEM_EXTENSIONS]) AC_CHECK_TYPE([sighandler_t], [], [HAVE_SIGHANDLER_T=0], [[ #include ]]) dnl Check for declarations of anything we want to poison if the dnl corresponding gnulib module is not in use. gl_WARN_ON_USE_PREPARE([[#include ]], [sigaction sigaddset sigdelset sigemptyset sigfillset sigismember sigpending sigprocmask]) ]) AC_DEFUN([gl_SIGNAL_MODULE_INDICATOR], [ dnl Use AC_REQUIRE here, so that the default settings are expanded once only. AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) gl_MODULE_INDICATOR_SET_VARIABLE([$1]) dnl Define it also as a C macro, for the benefit of the unit tests. gl_MODULE_INDICATOR_FOR_TESTS([$1]) ]) AC_DEFUN([gl_SIGNAL_H_DEFAULTS], [ GNULIB_SIGNAL_H_SIGPIPE=0; AC_SUBST([GNULIB_SIGNAL_H_SIGPIPE]) GNULIB_SIGPROCMASK=0; AC_SUBST([GNULIB_SIGPROCMASK]) GNULIB_SIGACTION=0; AC_SUBST([GNULIB_SIGACTION]) dnl Assume proper GNU behavior unless another module says otherwise. HAVE_POSIX_SIGNALBLOCKING=1; AC_SUBST([HAVE_POSIX_SIGNALBLOCKING]) HAVE_SIGSET_T=1; AC_SUBST([HAVE_SIGSET_T]) HAVE_SIGINFO_T=1; AC_SUBST([HAVE_SIGINFO_T]) HAVE_SIGACTION=1; AC_SUBST([HAVE_SIGACTION]) HAVE_STRUCT_SIGACTION_SA_SIGACTION=1; AC_SUBST([HAVE_STRUCT_SIGACTION_SA_SIGACTION]) HAVE_TYPE_VOLATILE_SIG_ATOMIC_T=1; AC_SUBST([HAVE_TYPE_VOLATILE_SIG_ATOMIC_T]) HAVE_SIGHANDLER_T=1; AC_SUBST([HAVE_SIGHANDLER_T]) ]) bfgminer-bfgminer-3.10.0/m4/signalblocking.m4000066400000000000000000000026561226556647300207540ustar00rootroot00000000000000# signalblocking.m4 serial 11 dnl Copyright (C) 2001-2002, 2006-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. # Determine available signal blocking primitives. Three different APIs exist: # 1) POSIX: sigemptyset, sigaddset, sigprocmask # 2) SYSV: sighold, sigrelse # 3) BSD: sigblock, sigsetmask # For simplicity, here we check only for the POSIX signal blocking. AC_DEFUN([gl_SIGNALBLOCKING], [ AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) signals_not_posix= AC_EGREP_HEADER([sigset_t], [signal.h], , [signals_not_posix=1]) if test -z "$signals_not_posix"; then AC_CHECK_FUNC([sigprocmask], [gl_cv_func_sigprocmask=1]) fi if test -z "$gl_cv_func_sigprocmask"; then HAVE_POSIX_SIGNALBLOCKING=0 fi ]) # Prerequisites of the part of lib/signal.in.h and of lib/sigprocmask.c. AC_DEFUN([gl_PREREQ_SIGPROCMASK], [ AC_REQUIRE([gl_SIGNAL_H_DEFAULTS]) AC_CHECK_TYPES([sigset_t], [gl_cv_type_sigset_t=yes], [gl_cv_type_sigset_t=no], [#include /* Mingw defines sigset_t not in , but in . */ #include ]) if test $gl_cv_type_sigset_t != yes; then HAVE_SIGSET_T=0 fi dnl HAVE_SIGSET_T is 1 if the system lacks the sigprocmask function but has dnl the sigset_t type. AC_SUBST([HAVE_SIGSET_T]) ]) bfgminer-bfgminer-3.10.0/m4/stddef_h.m4000066400000000000000000000027551226556647300175460ustar00rootroot00000000000000dnl A placeholder for POSIX 2008 , for platforms that have issues. # stddef_h.m4 serial 4 dnl Copyright (C) 2009-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. AC_DEFUN([gl_STDDEF_H], [ AC_REQUIRE([gl_STDDEF_H_DEFAULTS]) AC_REQUIRE([gt_TYPE_WCHAR_T]) STDDEF_H= if test $gt_cv_c_wchar_t = no; then HAVE_WCHAR_T=0 STDDEF_H=stddef.h fi AC_CACHE_CHECK([whether NULL can be used in arbitrary expressions], [gl_cv_decl_null_works], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include int test[2 * (sizeof NULL == sizeof (void *)) -1]; ]])], [gl_cv_decl_null_works=yes], [gl_cv_decl_null_works=no])]) if test $gl_cv_decl_null_works = no; then REPLACE_NULL=1 STDDEF_H=stddef.h fi AC_SUBST([STDDEF_H]) AM_CONDITIONAL([GL_GENERATE_STDDEF_H], [test -n "$STDDEF_H"]) if test -n "$STDDEF_H"; then gl_NEXT_HEADERS([stddef.h]) fi ]) AC_DEFUN([gl_STDDEF_MODULE_INDICATOR], [ dnl Use AC_REQUIRE here, so that the default settings are expanded once only. AC_REQUIRE([gl_STDDEF_H_DEFAULTS]) gl_MODULE_INDICATOR_SET_VARIABLE([$1]) ]) AC_DEFUN([gl_STDDEF_H_DEFAULTS], [ dnl Assume proper GNU behavior unless another module says otherwise. REPLACE_NULL=0; AC_SUBST([REPLACE_NULL]) HAVE_WCHAR_T=1; AC_SUBST([HAVE_WCHAR_T]) ]) bfgminer-bfgminer-3.10.0/m4/stdint.m4000066400000000000000000000367211226556647300172730ustar00rootroot00000000000000# stdint.m4 serial 41 dnl Copyright (C) 2001-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Paul Eggert and Bruno Haible. dnl Test whether is supported or must be substituted. AC_DEFUN_ONCE([gl_STDINT_H], [ AC_PREREQ([2.59])dnl dnl Check for long long int and unsigned long long int. AC_REQUIRE([AC_TYPE_LONG_LONG_INT]) if test $ac_cv_type_long_long_int = yes; then HAVE_LONG_LONG_INT=1 else HAVE_LONG_LONG_INT=0 fi AC_SUBST([HAVE_LONG_LONG_INT]) AC_REQUIRE([AC_TYPE_UNSIGNED_LONG_LONG_INT]) if test $ac_cv_type_unsigned_long_long_int = yes; then HAVE_UNSIGNED_LONG_LONG_INT=1 else HAVE_UNSIGNED_LONG_LONG_INT=0 fi AC_SUBST([HAVE_UNSIGNED_LONG_LONG_INT]) dnl Check for , in the same way as gl_WCHAR_H does. AC_CHECK_HEADERS_ONCE([wchar.h]) if test $ac_cv_header_wchar_h = yes; then HAVE_WCHAR_H=1 else HAVE_WCHAR_H=0 fi AC_SUBST([HAVE_WCHAR_H]) dnl Check for . dnl AC_INCLUDES_DEFAULT defines $ac_cv_header_inttypes_h. if test $ac_cv_header_inttypes_h = yes; then HAVE_INTTYPES_H=1 else HAVE_INTTYPES_H=0 fi AC_SUBST([HAVE_INTTYPES_H]) dnl Check for . dnl AC_INCLUDES_DEFAULT defines $ac_cv_header_sys_types_h. if test $ac_cv_header_sys_types_h = yes; then HAVE_SYS_TYPES_H=1 else HAVE_SYS_TYPES_H=0 fi AC_SUBST([HAVE_SYS_TYPES_H]) gl_CHECK_NEXT_HEADERS([stdint.h]) if test $ac_cv_header_stdint_h = yes; then HAVE_STDINT_H=1 else HAVE_STDINT_H=0 fi AC_SUBST([HAVE_STDINT_H]) dnl Now see whether we need a substitute . if test $ac_cv_header_stdint_h = yes; then AC_CACHE_CHECK([whether stdint.h conforms to C99], [gl_cv_header_working_stdint_h], [gl_cv_header_working_stdint_h=no AC_COMPILE_IFELSE([ AC_LANG_PROGRAM([[ #define __STDC_LIMIT_MACROS 1 /* to make it work also in C++ mode */ #define __STDC_CONSTANT_MACROS 1 /* to make it work also in C++ mode */ #define _GL_JUST_INCLUDE_SYSTEM_STDINT_H 1 /* work if build isn't clean */ #include /* Dragonfly defines WCHAR_MIN, WCHAR_MAX only in . */ #if !(defined WCHAR_MIN && defined WCHAR_MAX) #error "WCHAR_MIN, WCHAR_MAX not defined in " #endif ] gl_STDINT_INCLUDES [ #ifdef INT8_MAX int8_t a1 = INT8_MAX; int8_t a1min = INT8_MIN; #endif #ifdef INT16_MAX int16_t a2 = INT16_MAX; int16_t a2min = INT16_MIN; #endif #ifdef INT32_MAX int32_t a3 = INT32_MAX; int32_t a3min = INT32_MIN; #endif #ifdef INT64_MAX int64_t a4 = INT64_MAX; int64_t a4min = INT64_MIN; #endif #ifdef UINT8_MAX uint8_t b1 = UINT8_MAX; #else typedef int b1[(unsigned char) -1 != 255 ? 1 : -1]; #endif #ifdef UINT16_MAX uint16_t b2 = UINT16_MAX; #endif #ifdef UINT32_MAX uint32_t b3 = UINT32_MAX; #endif #ifdef UINT64_MAX uint64_t b4 = UINT64_MAX; #endif int_least8_t c1 = INT8_C (0x7f); int_least8_t c1max = INT_LEAST8_MAX; int_least8_t c1min = INT_LEAST8_MIN; int_least16_t c2 = INT16_C (0x7fff); int_least16_t c2max = INT_LEAST16_MAX; int_least16_t c2min = INT_LEAST16_MIN; int_least32_t c3 = INT32_C (0x7fffffff); int_least32_t c3max = INT_LEAST32_MAX; int_least32_t c3min = INT_LEAST32_MIN; int_least64_t c4 = INT64_C (0x7fffffffffffffff); int_least64_t c4max = INT_LEAST64_MAX; int_least64_t c4min = INT_LEAST64_MIN; uint_least8_t d1 = UINT8_C (0xff); uint_least8_t d1max = UINT_LEAST8_MAX; uint_least16_t d2 = UINT16_C (0xffff); uint_least16_t d2max = UINT_LEAST16_MAX; uint_least32_t d3 = UINT32_C (0xffffffff); uint_least32_t d3max = UINT_LEAST32_MAX; uint_least64_t d4 = UINT64_C (0xffffffffffffffff); uint_least64_t d4max = UINT_LEAST64_MAX; int_fast8_t e1 = INT_FAST8_MAX; int_fast8_t e1min = INT_FAST8_MIN; int_fast16_t e2 = INT_FAST16_MAX; int_fast16_t e2min = INT_FAST16_MIN; int_fast32_t e3 = INT_FAST32_MAX; int_fast32_t e3min = INT_FAST32_MIN; int_fast64_t e4 = INT_FAST64_MAX; int_fast64_t e4min = INT_FAST64_MIN; uint_fast8_t f1 = UINT_FAST8_MAX; uint_fast16_t f2 = UINT_FAST16_MAX; uint_fast32_t f3 = UINT_FAST32_MAX; uint_fast64_t f4 = UINT_FAST64_MAX; #ifdef INTPTR_MAX intptr_t g = INTPTR_MAX; intptr_t gmin = INTPTR_MIN; #endif #ifdef UINTPTR_MAX uintptr_t h = UINTPTR_MAX; #endif intmax_t i = INTMAX_MAX; uintmax_t j = UINTMAX_MAX; #include /* for CHAR_BIT */ #define TYPE_MINIMUM(t) \ ((t) ((t) 0 < (t) -1 ? (t) 0 : ~ TYPE_MAXIMUM (t))) #define TYPE_MAXIMUM(t) \ ((t) ((t) 0 < (t) -1 \ ? (t) -1 \ : ((((t) 1 << (sizeof (t) * CHAR_BIT - 2)) - 1) * 2 + 1))) struct s { int check_PTRDIFF: PTRDIFF_MIN == TYPE_MINIMUM (ptrdiff_t) && PTRDIFF_MAX == TYPE_MAXIMUM (ptrdiff_t) ? 1 : -1; /* Detect bug in FreeBSD 6.0 / ia64. */ int check_SIG_ATOMIC: SIG_ATOMIC_MIN == TYPE_MINIMUM (sig_atomic_t) && SIG_ATOMIC_MAX == TYPE_MAXIMUM (sig_atomic_t) ? 1 : -1; int check_SIZE: SIZE_MAX == TYPE_MAXIMUM (size_t) ? 1 : -1; int check_WCHAR: WCHAR_MIN == TYPE_MINIMUM (wchar_t) && WCHAR_MAX == TYPE_MAXIMUM (wchar_t) ? 1 : -1; /* Detect bug in mingw. */ int check_WINT: WINT_MIN == TYPE_MINIMUM (wint_t) && WINT_MAX == TYPE_MAXIMUM (wint_t) ? 1 : -1; /* Detect bugs in glibc 2.4 and Solaris 10 stdint.h, among others. */ int check_UINT8_C: (-1 < UINT8_C (0)) == (-1 < (uint_least8_t) 0) ? 1 : -1; int check_UINT16_C: (-1 < UINT16_C (0)) == (-1 < (uint_least16_t) 0) ? 1 : -1; /* Detect bugs in OpenBSD 3.9 stdint.h. */ #ifdef UINT8_MAX int check_uint8: (uint8_t) -1 == UINT8_MAX ? 1 : -1; #endif #ifdef UINT16_MAX int check_uint16: (uint16_t) -1 == UINT16_MAX ? 1 : -1; #endif #ifdef UINT32_MAX int check_uint32: (uint32_t) -1 == UINT32_MAX ? 1 : -1; #endif #ifdef UINT64_MAX int check_uint64: (uint64_t) -1 == UINT64_MAX ? 1 : -1; #endif int check_uint_least8: (uint_least8_t) -1 == UINT_LEAST8_MAX ? 1 : -1; int check_uint_least16: (uint_least16_t) -1 == UINT_LEAST16_MAX ? 1 : -1; int check_uint_least32: (uint_least32_t) -1 == UINT_LEAST32_MAX ? 1 : -1; int check_uint_least64: (uint_least64_t) -1 == UINT_LEAST64_MAX ? 1 : -1; int check_uint_fast8: (uint_fast8_t) -1 == UINT_FAST8_MAX ? 1 : -1; int check_uint_fast16: (uint_fast16_t) -1 == UINT_FAST16_MAX ? 1 : -1; int check_uint_fast32: (uint_fast32_t) -1 == UINT_FAST32_MAX ? 1 : -1; int check_uint_fast64: (uint_fast64_t) -1 == UINT_FAST64_MAX ? 1 : -1; int check_uintptr: (uintptr_t) -1 == UINTPTR_MAX ? 1 : -1; int check_uintmax: (uintmax_t) -1 == UINTMAX_MAX ? 1 : -1; int check_size: (size_t) -1 == SIZE_MAX ? 1 : -1; }; ]])], [dnl Determine whether the various *_MIN, *_MAX macros are usable dnl in preprocessor expression. We could do it by compiling a test dnl program for each of these macros. It is faster to run a program dnl that inspects the macro expansion. dnl This detects a bug on HP-UX 11.23/ia64. AC_RUN_IFELSE([ AC_LANG_PROGRAM([[ #define __STDC_LIMIT_MACROS 1 /* to make it work also in C++ mode */ #define __STDC_CONSTANT_MACROS 1 /* to make it work also in C++ mode */ #define _GL_JUST_INCLUDE_SYSTEM_STDINT_H 1 /* work if build isn't clean */ #include ] gl_STDINT_INCLUDES [ #include #include #define MVAL(macro) MVAL1(macro) #define MVAL1(expression) #expression static const char *macro_values[] = { #ifdef INT8_MAX MVAL (INT8_MAX), #endif #ifdef INT16_MAX MVAL (INT16_MAX), #endif #ifdef INT32_MAX MVAL (INT32_MAX), #endif #ifdef INT64_MAX MVAL (INT64_MAX), #endif #ifdef UINT8_MAX MVAL (UINT8_MAX), #endif #ifdef UINT16_MAX MVAL (UINT16_MAX), #endif #ifdef UINT32_MAX MVAL (UINT32_MAX), #endif #ifdef UINT64_MAX MVAL (UINT64_MAX), #endif NULL }; ]], [[ const char **mv; for (mv = macro_values; *mv != NULL; mv++) { const char *value = *mv; /* Test whether it looks like a cast expression. */ if (strncmp (value, "((unsigned int)"/*)*/, 15) == 0 || strncmp (value, "((unsigned short)"/*)*/, 17) == 0 || strncmp (value, "((unsigned char)"/*)*/, 16) == 0 || strncmp (value, "((int)"/*)*/, 6) == 0 || strncmp (value, "((signed short)"/*)*/, 15) == 0 || strncmp (value, "((signed char)"/*)*/, 14) == 0) return mv - macro_values + 1; } return 0; ]])], [gl_cv_header_working_stdint_h=yes], [], [dnl When cross-compiling, assume it works. gl_cv_header_working_stdint_h=yes ]) ]) ]) fi if test "$gl_cv_header_working_stdint_h" = yes; then STDINT_H= else dnl Check for , and for dnl (used in Linux libc4 >= 4.6.7 and libc5). AC_CHECK_HEADERS([sys/inttypes.h sys/bitypes.h]) if test $ac_cv_header_sys_inttypes_h = yes; then HAVE_SYS_INTTYPES_H=1 else HAVE_SYS_INTTYPES_H=0 fi AC_SUBST([HAVE_SYS_INTTYPES_H]) if test $ac_cv_header_sys_bitypes_h = yes; then HAVE_SYS_BITYPES_H=1 else HAVE_SYS_BITYPES_H=0 fi AC_SUBST([HAVE_SYS_BITYPES_H]) gl_STDINT_TYPE_PROPERTIES STDINT_H=stdint.h fi AC_SUBST([STDINT_H]) AM_CONDITIONAL([GL_GENERATE_STDINT_H], [test -n "$STDINT_H"]) ]) dnl gl_STDINT_BITSIZEOF(TYPES, INCLUDES) dnl Determine the size of each of the given types in bits. AC_DEFUN([gl_STDINT_BITSIZEOF], [ dnl Use a shell loop, to avoid bloating configure, and dnl - extra AH_TEMPLATE calls, so that autoheader knows what to put into dnl config.h.in, dnl - extra AC_SUBST calls, so that the right substitutions are made. m4_foreach_w([gltype], [$1], [AH_TEMPLATE([BITSIZEOF_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]), [Define to the number of bits in type ']gltype['.])]) for gltype in $1 ; do AC_CACHE_CHECK([for bit size of $gltype], [gl_cv_bitsizeof_${gltype}], [AC_COMPUTE_INT([result], [sizeof ($gltype) * CHAR_BIT], [$2 #include ], [result=unknown]) eval gl_cv_bitsizeof_${gltype}=\$result ]) eval result=\$gl_cv_bitsizeof_${gltype} if test $result = unknown; then dnl Use a nonempty default, because some compilers, such as IRIX 5 cc, dnl do a syntax check even on unused #if conditions and give an error dnl on valid C code like this: dnl #if 0 dnl # if > 32 dnl # endif dnl #endif result=0 fi GLTYPE=`echo "$gltype" | tr 'abcdefghijklmnopqrstuvwxyz ' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'` AC_DEFINE_UNQUOTED([BITSIZEOF_${GLTYPE}], [$result]) eval BITSIZEOF_${GLTYPE}=\$result done m4_foreach_w([gltype], [$1], [AC_SUBST([BITSIZEOF_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]))]) ]) dnl gl_CHECK_TYPES_SIGNED(TYPES, INCLUDES) dnl Determine the signedness of each of the given types. dnl Define HAVE_SIGNED_TYPE if type is signed. AC_DEFUN([gl_CHECK_TYPES_SIGNED], [ dnl Use a shell loop, to avoid bloating configure, and dnl - extra AH_TEMPLATE calls, so that autoheader knows what to put into dnl config.h.in, dnl - extra AC_SUBST calls, so that the right substitutions are made. m4_foreach_w([gltype], [$1], [AH_TEMPLATE([HAVE_SIGNED_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]), [Define to 1 if ']gltype[' is a signed integer type.])]) for gltype in $1 ; do AC_CACHE_CHECK([whether $gltype is signed], [gl_cv_type_${gltype}_signed], [AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([$2[ int verify[2 * (($gltype) -1 < ($gltype) 0) - 1];]])], result=yes, result=no) eval gl_cv_type_${gltype}_signed=\$result ]) eval result=\$gl_cv_type_${gltype}_signed GLTYPE=`echo $gltype | tr 'abcdefghijklmnopqrstuvwxyz ' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'` if test "$result" = yes; then AC_DEFINE_UNQUOTED([HAVE_SIGNED_${GLTYPE}], [1]) eval HAVE_SIGNED_${GLTYPE}=1 else eval HAVE_SIGNED_${GLTYPE}=0 fi done m4_foreach_w([gltype], [$1], [AC_SUBST([HAVE_SIGNED_]m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_]))]) ]) dnl gl_INTEGER_TYPE_SUFFIX(TYPES, INCLUDES) dnl Determine the suffix to use for integer constants of the given types. dnl Define t_SUFFIX for each such type. AC_DEFUN([gl_INTEGER_TYPE_SUFFIX], [ dnl Use a shell loop, to avoid bloating configure, and dnl - extra AH_TEMPLATE calls, so that autoheader knows what to put into dnl config.h.in, dnl - extra AC_SUBST calls, so that the right substitutions are made. m4_foreach_w([gltype], [$1], [AH_TEMPLATE(m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_])[_SUFFIX], [Define to l, ll, u, ul, ull, etc., as suitable for constants of type ']gltype['.])]) for gltype in $1 ; do AC_CACHE_CHECK([for $gltype integer literal suffix], [gl_cv_type_${gltype}_suffix], [eval gl_cv_type_${gltype}_suffix=no eval result=\$gl_cv_type_${gltype}_signed if test "$result" = yes; then glsufu= else glsufu=u fi for glsuf in "$glsufu" ${glsufu}l ${glsufu}ll ${glsufu}i64; do case $glsuf in '') gltype1='int';; l) gltype1='long int';; ll) gltype1='long long int';; i64) gltype1='__int64';; u) gltype1='unsigned int';; ul) gltype1='unsigned long int';; ull) gltype1='unsigned long long int';; ui64)gltype1='unsigned __int64';; esac AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([$2[ extern $gltype foo; extern $gltype1 foo;]])], [eval gl_cv_type_${gltype}_suffix=\$glsuf]) eval result=\$gl_cv_type_${gltype}_suffix test "$result" != no && break done]) GLTYPE=`echo $gltype | tr 'abcdefghijklmnopqrstuvwxyz ' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'` eval result=\$gl_cv_type_${gltype}_suffix test "$result" = no && result= eval ${GLTYPE}_SUFFIX=\$result AC_DEFINE_UNQUOTED([${GLTYPE}_SUFFIX], [$result]) done m4_foreach_w([gltype], [$1], [AC_SUBST(m4_translit(gltype,[abcdefghijklmnopqrstuvwxyz ],[ABCDEFGHIJKLMNOPQRSTUVWXYZ_])[_SUFFIX])]) ]) dnl gl_STDINT_INCLUDES AC_DEFUN([gl_STDINT_INCLUDES], [[ /* BSD/OS 4.0.1 has a bug: , and must be included before . */ #include #include #if HAVE_WCHAR_H # include # include # include #endif ]]) dnl gl_STDINT_TYPE_PROPERTIES dnl Compute HAVE_SIGNED_t, BITSIZEOF_t and t_SUFFIX, for all the types t dnl of interest to stdint.in.h. AC_DEFUN([gl_STDINT_TYPE_PROPERTIES], [ AC_REQUIRE([gl_MULTIARCH]) if test $APPLE_UNIVERSAL_BUILD = 0; then gl_STDINT_BITSIZEOF([ptrdiff_t size_t], [gl_STDINT_INCLUDES]) fi gl_STDINT_BITSIZEOF([sig_atomic_t wchar_t wint_t], [gl_STDINT_INCLUDES]) gl_CHECK_TYPES_SIGNED([sig_atomic_t wchar_t wint_t], [gl_STDINT_INCLUDES]) gl_cv_type_ptrdiff_t_signed=yes gl_cv_type_size_t_signed=no if test $APPLE_UNIVERSAL_BUILD = 0; then gl_INTEGER_TYPE_SUFFIX([ptrdiff_t size_t], [gl_STDINT_INCLUDES]) fi gl_INTEGER_TYPE_SUFFIX([sig_atomic_t wchar_t wint_t], [gl_STDINT_INCLUDES]) ]) dnl Autoconf >= 2.61 has AC_COMPUTE_INT built-in. dnl Remove this when we can assume autoconf >= 2.61. m4_ifdef([AC_COMPUTE_INT], [], [ AC_DEFUN([AC_COMPUTE_INT], [_AC_COMPUTE_INT([$2],[$1],[$3],[$4])]) ]) # Hey Emacs! # Local Variables: # indent-tabs-mode: nil # End: bfgminer-bfgminer-3.10.0/m4/string_h.m4000066400000000000000000000123631226556647300175770ustar00rootroot00000000000000# Configure a GNU-like replacement for . # Copyright (C) 2007-2011 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # serial 20 # Written by Paul Eggert. AC_DEFUN([gl_HEADER_STRING_H], [ dnl Use AC_REQUIRE here, so that the default behavior below is expanded dnl once only, before all statements that occur in other macros. AC_REQUIRE([gl_HEADER_STRING_H_BODY]) ]) AC_DEFUN([gl_HEADER_STRING_H_BODY], [ AC_REQUIRE([AC_C_RESTRICT]) AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) gl_NEXT_HEADERS([string.h]) dnl Check for declarations of anything we want to poison if the dnl corresponding gnulib module is not in use, and which is not dnl guaranteed by C89. gl_WARN_ON_USE_PREPARE([[#include ]], [memmem mempcpy memrchr rawmemchr stpcpy stpncpy strchrnul strdup strncat strndup strnlen strpbrk strsep strcasestr strtok_r strerror_r strsignal strverscmp]) ]) AC_DEFUN([gl_STRING_MODULE_INDICATOR], [ dnl Use AC_REQUIRE here, so that the default settings are expanded once only. AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) gl_MODULE_INDICATOR_SET_VARIABLE([$1]) dnl Define it also as a C macro, for the benefit of the unit tests. gl_MODULE_INDICATOR_FOR_TESTS([$1]) ]) AC_DEFUN([gl_HEADER_STRING_H_DEFAULTS], [ GNULIB_MEMCHR=0; AC_SUBST([GNULIB_MEMCHR]) GNULIB_MEMMEM=0; AC_SUBST([GNULIB_MEMMEM]) GNULIB_MEMPCPY=0; AC_SUBST([GNULIB_MEMPCPY]) GNULIB_MEMRCHR=0; AC_SUBST([GNULIB_MEMRCHR]) GNULIB_RAWMEMCHR=0; AC_SUBST([GNULIB_RAWMEMCHR]) GNULIB_STPCPY=0; AC_SUBST([GNULIB_STPCPY]) GNULIB_STPNCPY=0; AC_SUBST([GNULIB_STPNCPY]) GNULIB_STRCHRNUL=0; AC_SUBST([GNULIB_STRCHRNUL]) GNULIB_STRDUP=0; AC_SUBST([GNULIB_STRDUP]) GNULIB_STRNCAT=0; AC_SUBST([GNULIB_STRNCAT]) GNULIB_STRNDUP=0; AC_SUBST([GNULIB_STRNDUP]) GNULIB_STRNLEN=0; AC_SUBST([GNULIB_STRNLEN]) GNULIB_STRPBRK=0; AC_SUBST([GNULIB_STRPBRK]) GNULIB_STRSEP=0; AC_SUBST([GNULIB_STRSEP]) GNULIB_STRSTR=0; AC_SUBST([GNULIB_STRSTR]) GNULIB_STRCASESTR=0; AC_SUBST([GNULIB_STRCASESTR]) GNULIB_STRTOK_R=0; AC_SUBST([GNULIB_STRTOK_R]) GNULIB_MBSLEN=0; AC_SUBST([GNULIB_MBSLEN]) GNULIB_MBSNLEN=0; AC_SUBST([GNULIB_MBSNLEN]) GNULIB_MBSCHR=0; AC_SUBST([GNULIB_MBSCHR]) GNULIB_MBSRCHR=0; AC_SUBST([GNULIB_MBSRCHR]) GNULIB_MBSSTR=0; AC_SUBST([GNULIB_MBSSTR]) GNULIB_MBSCASECMP=0; AC_SUBST([GNULIB_MBSCASECMP]) GNULIB_MBSNCASECMP=0; AC_SUBST([GNULIB_MBSNCASECMP]) GNULIB_MBSPCASECMP=0; AC_SUBST([GNULIB_MBSPCASECMP]) GNULIB_MBSCASESTR=0; AC_SUBST([GNULIB_MBSCASESTR]) GNULIB_MBSCSPN=0; AC_SUBST([GNULIB_MBSCSPN]) GNULIB_MBSPBRK=0; AC_SUBST([GNULIB_MBSPBRK]) GNULIB_MBSSPN=0; AC_SUBST([GNULIB_MBSSPN]) GNULIB_MBSSEP=0; AC_SUBST([GNULIB_MBSSEP]) GNULIB_MBSTOK_R=0; AC_SUBST([GNULIB_MBSTOK_R]) GNULIB_STRERROR=0; AC_SUBST([GNULIB_STRERROR]) GNULIB_STRERROR_R=0; AC_SUBST([GNULIB_STRERROR_R]) GNULIB_STRSIGNAL=0; AC_SUBST([GNULIB_STRSIGNAL]) GNULIB_STRVERSCMP=0; AC_SUBST([GNULIB_STRVERSCMP]) HAVE_MBSLEN=0; AC_SUBST([HAVE_MBSLEN]) dnl Assume proper GNU behavior unless another module says otherwise. HAVE_MEMCHR=1; AC_SUBST([HAVE_MEMCHR]) HAVE_DECL_MEMMEM=1; AC_SUBST([HAVE_DECL_MEMMEM]) HAVE_MEMPCPY=1; AC_SUBST([HAVE_MEMPCPY]) HAVE_DECL_MEMRCHR=1; AC_SUBST([HAVE_DECL_MEMRCHR]) HAVE_RAWMEMCHR=1; AC_SUBST([HAVE_RAWMEMCHR]) HAVE_STPCPY=1; AC_SUBST([HAVE_STPCPY]) HAVE_STPNCPY=1; AC_SUBST([HAVE_STPNCPY]) HAVE_STRCHRNUL=1; AC_SUBST([HAVE_STRCHRNUL]) HAVE_DECL_STRDUP=1; AC_SUBST([HAVE_DECL_STRDUP]) HAVE_DECL_STRNDUP=1; AC_SUBST([HAVE_DECL_STRNDUP]) HAVE_DECL_STRNLEN=1; AC_SUBST([HAVE_DECL_STRNLEN]) HAVE_STRPBRK=1; AC_SUBST([HAVE_STRPBRK]) HAVE_STRSEP=1; AC_SUBST([HAVE_STRSEP]) HAVE_STRCASESTR=1; AC_SUBST([HAVE_STRCASESTR]) HAVE_DECL_STRTOK_R=1; AC_SUBST([HAVE_DECL_STRTOK_R]) HAVE_DECL_STRERROR_R=1; AC_SUBST([HAVE_DECL_STRERROR_R]) HAVE_DECL_STRSIGNAL=1; AC_SUBST([HAVE_DECL_STRSIGNAL]) HAVE_STRVERSCMP=1; AC_SUBST([HAVE_STRVERSCMP]) REPLACE_MEMCHR=0; AC_SUBST([REPLACE_MEMCHR]) REPLACE_MEMMEM=0; AC_SUBST([REPLACE_MEMMEM]) REPLACE_STPNCPY=0; AC_SUBST([REPLACE_STPNCPY]) REPLACE_STRDUP=0; AC_SUBST([REPLACE_STRDUP]) REPLACE_STRSTR=0; AC_SUBST([REPLACE_STRSTR]) REPLACE_STRCASESTR=0; AC_SUBST([REPLACE_STRCASESTR]) REPLACE_STRCHRNUL=0; AC_SUBST([REPLACE_STRCHRNUL]) REPLACE_STRERROR=0; AC_SUBST([REPLACE_STRERROR]) REPLACE_STRERROR_R=0; AC_SUBST([REPLACE_STRERROR_R]) REPLACE_STRNCAT=0; AC_SUBST([REPLACE_STRNCAT]) REPLACE_STRNDUP=0; AC_SUBST([REPLACE_STRNDUP]) REPLACE_STRNLEN=0; AC_SUBST([REPLACE_STRNLEN]) REPLACE_STRSIGNAL=0; AC_SUBST([REPLACE_STRSIGNAL]) REPLACE_STRTOK_R=0; AC_SUBST([REPLACE_STRTOK_R]) UNDEFINE_STRTOK_R=0; AC_SUBST([UNDEFINE_STRTOK_R]) ]) bfgminer-bfgminer-3.10.0/m4/strtok_r.m4000066400000000000000000000050221226556647300176230ustar00rootroot00000000000000# strtok_r.m4 serial 12 dnl Copyright (C) 2002-2004, 2006-2007, 2009-2011 Free Software Foundation, dnl Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. AC_DEFUN([gl_FUNC_STRTOK_R], [ dnl The strtok_r() declaration in lib/string.in.h uses 'restrict'. AC_REQUIRE([AC_C_RESTRICT]) AC_REQUIRE([gl_HEADER_STRING_H_DEFAULTS]) AC_REQUIRE([AC_CANONICAL_HOST]) dnl for cross-compiles AC_CHECK_FUNCS([strtok_r]) if test $ac_cv_func_strtok_r = yes; then dnl glibc 2.7 has a bug in strtok_r that causes a segmentation fault dnl when the second argument to strtok_r is a constant string that has dnl exactly one byte and compiling with optimization. This bug is, for dnl example, present in the glibc 2.7-18 package in Debian "lenny". dnl See . AC_CACHE_CHECK([whether strtok_r works], [gl_cv_func_strtok_r_works], [AC_RUN_IFELSE( [AC_LANG_PROGRAM([[ #ifndef __OPTIMIZE__ # define __OPTIMIZE__ 1 #endif #undef __OPTIMIZE_SIZE__ #undef __NO_INLINE__ #include #include ]], [[static const char dummy[] = "\177\01a"; char delimiters[] = "xxxxxxxx"; char *save_ptr = (char *) dummy; strtok_r (delimiters, "x", &save_ptr); strtok_r (NULL, "x", &save_ptr); return 0; ]]) ], [gl_cv_func_strtok_r_works=yes], [gl_cv_func_strtok_r_works=no], [ changequote(,)dnl case "$host_os" in # Guess no on glibc systems. *-gnu*) gl_cv_func_strtok_r_works="guessing no";; *) gl_cv_func_strtok_r_works="guessing yes";; esac changequote([,])dnl ]) ]) case "$gl_cv_func_strtok_r_works" in *no) dnl We could set REPLACE_STRTOK_R=1 and AC_LIBOBJ([strtok_r]) here, dnl but it's only the macro version in which is wrong. dnl The code compiled into libc is fine. UNDEFINE_STRTOK_R=1 ;; esac else AC_LIBOBJ([strtok_r]) gl_PREREQ_STRTOK_R fi AC_CHECK_DECLS_ONCE([strtok_r]) if test $ac_cv_have_decl_strtok_r = no; then HAVE_DECL_STRTOK_R=0 fi ]) # Prerequisites of lib/strtok_r.c. AC_DEFUN([gl_PREREQ_STRTOK_R], [ : ]) bfgminer-bfgminer-3.10.0/m4/warn-on-use.m4000066400000000000000000000040271226556647300201330ustar00rootroot00000000000000# warn-on-use.m4 serial 2 dnl Copyright (C) 2010-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. # gl_WARN_ON_USE_PREPARE(INCLUDES, NAMES) # --------------------------------------- # For each whitespace-separated element in the list of NAMES, define # HAVE_RAW_DECL_name if the function has a declaration among INCLUDES # even after being undefined as a macro. # # See warn-on-use.h for some hints on how to poison function names, as # well as ideas on poisoning global variables and macros. NAMES may # include global variables, but remember that only functions work with # _GL_WARN_ON_USE. Typically, INCLUDES only needs to list a single # header, but if the replacement header pulls in other headers because # some systems declare functions in the wrong header, then INCLUDES # should do likewise. # # If you assume C89, then it is generally safe to assume declarations # for functions declared in that standard (such as gets) without # needing gl_WARN_ON_USE_PREPARE. AC_DEFUN([gl_WARN_ON_USE_PREPARE], [ m4_foreach_w([gl_decl], [$2], [AH_TEMPLATE([HAVE_RAW_DECL_]AS_TR_CPP(m4_defn([gl_decl])), [Define to 1 if ]m4_defn([gl_decl])[ is declared even after undefining macros.])])dnl for gl_func in m4_flatten([$2]); do AS_VAR_PUSHDEF([gl_Symbol], [gl_cv_have_raw_decl_$gl_func])dnl AC_CACHE_CHECK([whether $gl_func is declared without a macro], gl_Symbol, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([$1], [@%:@undef $gl_func (void) $gl_func;])], [AS_VAR_SET(gl_Symbol, [yes])], [AS_VAR_SET(gl_Symbol, [no])])]) AS_VAR_IF(gl_Symbol, [yes], [AC_DEFINE_UNQUOTED(AS_TR_CPP([HAVE_RAW_DECL_$gl_func]), [1]) dnl shortcut - if the raw declaration exists, then set a cache dnl variable to allow skipping any later AC_CHECK_DECL efforts eval ac_cv_have_decl_$gl_func=yes]) AS_VAR_POPDEF([gl_Symbol])dnl done ]) bfgminer-bfgminer-3.10.0/m4/wchar_t.m4000066400000000000000000000014621226556647300174070ustar00rootroot00000000000000# wchar_t.m4 serial 4 (gettext-0.18.2) dnl Copyright (C) 2002-2003, 2008-2011 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. dnl Test whether has the 'wchar_t' type. dnl Prerequisite: AC_PROG_CC AC_DEFUN([gt_TYPE_WCHAR_T], [ AC_CACHE_CHECK([for wchar_t], [gt_cv_c_wchar_t], [AC_COMPILE_IFELSE( [AC_LANG_PROGRAM( [[#include wchar_t foo = (wchar_t)'\0';]], [[]])], [gt_cv_c_wchar_t=yes], [gt_cv_c_wchar_t=no])]) if test $gt_cv_c_wchar_t = yes; then AC_DEFINE([HAVE_WCHAR_T], [1], [Define if you have the 'wchar_t' type.]) fi ]) bfgminer-bfgminer-3.10.0/make-release000077500000000000000000000055571226556647300174700ustar00rootroot00000000000000#!/bin/bash # Copyright 2012-2013 Luke Dashjr # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. See COPYING for more details. set -e set -x tag="$1"; shift [ -n "$tag" ] || exit 1 sw="$1"; shift || true [ -n "$sw" ] || sw="$tag" test -n "$DEBUG_RELEASE" || DEBUG_RELEASE=1 builds=(win32 win64) win32_machine='i686-pc-mingw32' win32_CFLAGS='-march=i686' win64_machine='x86_64-w64-mingw32' win64_CFLAGS='' IDIR="$PWD" OUTDIR="$PWD" TMPROOT="$PWD/make-release-tmp" TMPDIR="${TMPROOT}/${sw}-tmp" mkdir -vp "$TMPDIR" # Source release git branch TMP "$tag" git clone . "$TMPDIR" -b TMP --depth 1 git branch -D TMP cd "$TMPDIR" git submodule update --init { git archive --prefix "$sw"/ --format tar "$tag" git submodule --quiet foreach --recursive 'git archive --prefix "'"$sw"'/$path/" --format tar HEAD' } | tar -xivp cd "$sw" NOSUBMODULES=1 \ NOCONFIGURE=1 \ ./autogen.sh find . -name autom4te.cache | xargs rm -r cd .. zip -r "$OUTDIR/${sw}.zip" "$sw" tar cjvpf "$OUTDIR/${sw}.tbz2" "$sw" SRCDIR="$TMPDIR/$sw" dlls=' backtrace.dll pdcurses.dll libcurl-4.dll libevent-2-0-5.dll libhidapi-0.dll pthreadGC2.dll libjansson-4.dll libusb-1.0.dll zlib1.dll ' libmicrohttpd_dlls=' libmicrohttpd-10.dll libplibc-1.dll ' docs=' AUTHORS COPYING NEWS README README.ASIC README.FPGA README.GPU README.RPC README.scrypt ' for build in "${builds[@]}"; do PKGNAME="${sw}-${build}" PKGDIR="$TMPDIR/$PKGNAME" cd "$TMPDIR" mkdir -vp "$PKGDIR" for v in machine CFLAGS; do eval "${v}"="'$(eval echo "\${${build}_${v}}")'" done if test "x$DEBUG_RELEASE" = "x1"; then CFLAGS="${CFLAGS} -g" fi for doc in $docs; do sed 's/$/\r/' <"$doc" >"$PKGDIR/${doc}.txt" done NOCONFIGURE=1 \ ./autogen.sh ./configure \ --prefix='C:\\Program Files\\BFGMiner\\' \ CFLAGS="${CFLAGS} -Wall" \ --disable-cpumining \ --enable-opencl \ --enable-adl \ --enable-bitforce \ --enable-icarus \ --enable-modminer \ --enable-ztex \ --enable-scrypt \ --host="$machine" make $MAKEOPTS if test "x$DEBUG_RELEASE" != "x1"; then "$machine"-strip \ libblkmaker/.libs/*.dll \ *.exe fi cp -v \ *.exe \ libblkmaker/.libs/*.dll \ *.cl \ example.conf \ windows-build.txt \ miner.php \ "$PKGDIR/" mkdir "$PKGDIR/bitstreams" mydlls="$dlls" if "${machine}-objdump" -p bfgminer.exe | grep -q "DLL Name: libmicrohttpd"; then mydlls="$mydlls $libmicrohttpd_dlls" fi for dll in $mydlls; do libdir="/usr/$machine/usr/lib" [ -e "$libdir/$dll" ] || libdir="/usr/$machine/usr/bin" [ -e "$libdir/$dll" ] || continue cp -v -L "$libdir/$dll" "$PKGDIR" "$machine"-strip "$PKGDIR/$dll" done make clean cd "$PKGDIR/.." zip -r "$OUTDIR/$PKGNAME.zip" "$PKGNAME" done cd "$IDIR" bfgminer-bfgminer-3.10.0/mcp2210.c000066400000000000000000000172761226556647300164400ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include "logging.h" #include "lowlevel.h" #include "lowl-hid.h" #include "miner.h" #include "mcp2210.h" #define MCP2210_IDVENDOR 0x04d8 #define MCP2210_IDPRODUCT 0x00de static bool _mcp2210_devinfo_scan_cb(struct lowlevel_device_info * const usbinfo, void * const userp) { struct lowlevel_device_info **devinfo_list_p = userp, *info; info = malloc(sizeof(*info)); *info = (struct lowlevel_device_info){ .lowl = &lowl_mcp2210, }; lowlevel_devinfo_semicpy(info, usbinfo); LL_PREPEND(*devinfo_list_p, info); // Never *consume* the lowl_usb entry - especially since this is during the scan! return false; } static struct lowlevel_device_info *mcp2210_devinfo_scan() { struct lowlevel_device_info *devinfo_list = NULL; lowlevel_detect_id(_mcp2210_devinfo_scan_cb, &devinfo_list, &lowl_hid, MCP2210_IDVENDOR, MCP2210_IDPRODUCT); return devinfo_list; } struct mcp2210_device { hid_device *hid; // http://ww1.microchip.com/downloads/en/DeviceDoc/22288A.pdf pg 34 uint8_t cfg_spi[0x11]; // http://ww1.microchip.com/downloads/en/DeviceDoc/22288A.pdf pg 40 uint8_t cfg_gpio[0xf]; }; static bool mcp2210_io(hid_device * const hid, uint8_t * const cmd, uint8_t * const buf) { char hexcmd[(0x41 * 2) + 1]; if (opt_dev_protocol) bin2hex(hexcmd, cmd, 0x41); const bool rv = likely( 0x41 == hid_write(hid, cmd, 0x41) && 64 == hid_read(hid, buf, 64) ); if (opt_dev_protocol) { char hexbuf[(0x40 * 2) + 1]; bin2hex(hexbuf, buf, 0x40); applog(LOG_DEBUG, "mcp2210_io(%p, %s, %s)", hid, hexcmd, hexbuf); } return rv; } static bool mcp2210_get_configs(struct mcp2210_device * const h) { hid_device * const hid = h->hid; uint8_t cmd[0x41] = {0,0x41}, buf[0x40]; if (!mcp2210_io(hid, cmd, buf)) { applog(LOG_ERR, "%s: Failed to get current %s config", __func__, "SPI"); return false; } memcpy(h->cfg_spi, &buf[4], sizeof(h->cfg_spi)); cmd[1] = 0x20; if (!mcp2210_io(hid, cmd, buf)) { applog(LOG_ERR, "%s: Failed to get current %s config", __func__, "GPIO"); return false; } memcpy(h->cfg_gpio, &buf[4], sizeof(h->cfg_gpio)); return true; } struct mcp2210_device *mcp2210_open(const struct lowlevel_device_info * const info) { struct mcp2210_device *h; char * const path = info->path; hid_device * const hid = hid_open_path(path); if (unlikely(!hid)) return NULL; h = malloc(sizeof(*h)); h->hid = hid; if (!mcp2210_get_configs(h)) goto fail; return h; fail: free(h); return NULL; } void mcp2210_close(struct mcp2210_device * const h) { hid_close(h->hid); free(h); } static bool mcp2210_set_cfg_spi(struct mcp2210_device * const h) { hid_device * const hid = h->hid; uint8_t cmd[0x41] = {0,0x40}, buf[0x40]; memcpy(&cmd[5], h->cfg_spi, sizeof(h->cfg_spi)); if (!mcp2210_io(hid, cmd, buf)) { applog(LOG_ERR, "%s: Failed to set current %s config", __func__, "SPI"); return false; } if (buf[1] != 0) { applog(LOG_ERR, "%s: Error setting current %s config (%d)", __func__, "SPI", buf[1]); return false; } return true; } bool mcp2210_configure_spi(struct mcp2210_device * const h, const uint32_t bitrate, const uint16_t idlechipsel, const uint16_t activechipsel, const uint16_t chipseltodatadelay, const uint16_t lastbytetocsdelay, const uint16_t midbytedelay) { uint8_t * const cfg = h->cfg_spi; cfg[0] = (bitrate >> 0x00) & 0xff; cfg[1] = (bitrate >> 0x08) & 0xff; cfg[2] = (bitrate >> 0x10) & 0xff; cfg[3] = (bitrate >> 0x18) & 0xff; cfg[4] = ( idlechipsel >> 0) & 0xff; cfg[5] = ( idlechipsel >> 8) & 0xff; cfg[6] = (activechipsel >> 0) & 0xff; cfg[7] = (activechipsel >> 8) & 0xff; cfg[8] = (chipseltodatadelay >> 0) & 0xff; cfg[9] = (chipseltodatadelay >> 8) & 0xff; cfg[0xa] = (lastbytetocsdelay >> 0) & 0xff; cfg[0xb] = (lastbytetocsdelay >> 8) & 0xff; cfg[0xc] = (midbytedelay >> 0) & 0xff; cfg[0xd] = (midbytedelay >> 8) & 0xff; return mcp2210_set_cfg_spi(h); } bool mcp2210_set_spimode(struct mcp2210_device * const h, const uint8_t spimode) { uint8_t * const cfg = h->cfg_spi; cfg[0x10] = spimode; return mcp2210_set_cfg_spi(h); } bool mcp2210_spi_transfer(struct mcp2210_device * const h, const void * const tx, void * const rx, uint8_t sz) { hid_device * const hid = h->hid; uint8_t * const cfg = h->cfg_spi; uint8_t cmd[0x41] = {0,0x42}, buf[0x40]; uint8_t *p = rx; if (unlikely(sz > 60)) { applog(LOG_ERR, "%s: SPI transfer too long (%d bytes)", __func__, sz); return false; } cfg[0xe] = sz; cfg[0xf] = 0; if (!mcp2210_set_cfg_spi(h)) return false; cmd[2] = sz; memcpy(&cmd[5], tx, sz); if (unlikely(!mcp2210_io(hid, cmd, buf))) { applog(LOG_ERR, "%s: Failed to issue SPI transfer", __func__); return false; } while (true) { switch (buf[1]) { case 0: // accepted cmd[2] = 0; break; case 0xf8: // transfer in progress applog(LOG_DEBUG, "%s: SPI transfer rejected temporarily (%d bytes remaining)", __func__, sz); cgsleep_ms(20); goto retry; default: applog(LOG_ERR, "%s: SPI transfer error (%d) (%d bytes remaining)", __func__, buf[1], sz); return false; } if (buf[2] >= sz) { if (buf[2] > sz) applog(LOG_WARNING, "%s: Received %d extra bytes in SPI transfer", __func__, sz - buf[2]); memcpy(p, &buf[4], sz); return true; } memcpy(p, &buf[4], buf[2]); p += buf[2]; sz -= buf[2]; retry: if (unlikely(!mcp2210_io(hid, cmd, buf))) { applog(LOG_ERR, "%s: Failed to continue SPI transfer (%d bytes remaining)", __func__, sz); return false; } } } bool mcp2210_spi_cancel(struct mcp2210_device * const h) { hid_device * const hid = h->hid; uint8_t cmd[0x41] = {0,0x11}, buf[0x40]; if (!mcp2210_io(hid, cmd, buf)) return false; return (buf[1] == 0); } static bool mcp2210_set_cfg_gpio(struct mcp2210_device * const h) { hid_device * const hid = h->hid; uint8_t cmd[0x41] = {0,0x21}, buf[0x40]; // NOTE: NVRAM chip params access control is not set here memcpy(&cmd[5], h->cfg_gpio, 0xe); if (!mcp2210_io(hid, cmd, buf)) { applog(LOG_ERR, "%s: Failed to set current %s config", __func__, "GPIO"); return false; } if (buf[1] != 0) { applog(LOG_ERR, "%s: Error setting current %s config (%d)", __func__, "GPIO", buf[1]); return false; } return true; } bool mcp2210_set_gpio_output(struct mcp2210_device * const h, const int pin, const enum mcp2210_gpio_value d) { const int bit = 1 << (pin % 8); const int byte = (pin / 8); // Set pin to GPIO mode h->cfg_gpio[pin] = 0; // Set GPIO to output mode h->cfg_gpio[byte + 0xb] &= ~bit; // Set value for GPIO output if (d == MGV_HIGH) h->cfg_gpio[byte + 9] |= bit; else h->cfg_gpio[byte + 9] &= ~bit; return mcp2210_set_cfg_gpio(h); } enum mcp2210_gpio_value mcp2210_get_gpio_input(struct mcp2210_device * const h, const int pin) { hid_device * const hid = h->hid; uint8_t cmd[0x41] = {0,0x31}, buf[0x40]; const int bit = 1 << (pin % 8); const int byte = (pin / 8); // Set pin to GPIO mode h->cfg_gpio[pin] = 0; // Set GPIO to input mode h->cfg_gpio[byte + 0xb] |= bit; if (!mcp2210_set_cfg_gpio(h)) return MGV_ERROR; if (!mcp2210_io(hid, cmd, buf)) { applog(LOG_ERR, "%s: Failed to get current GPIO input values", __func__); return MGV_ERROR; } if (buf[byte + 4] & bit) return MGV_HIGH; else return MGV_LOW; } struct lowlevel_driver lowl_mcp2210 = { .dname = "mcp2210", .devinfo_scan = mcp2210_devinfo_scan, }; bfgminer-bfgminer-3.10.0/mcp2210.h000066400000000000000000000017421226556647300164340ustar00rootroot00000000000000#ifndef BFG_MCP2210_H #define BFG_MCP2210_H #include #include enum mcp2210_gpio_direction { MGD_OUTPUT, MGD_INPUT, }; enum mcp2210_gpio_value { MGV_LOW, MGV_HIGH, MGV_ERROR, }; struct mcp2210_device; extern struct mcp2210_device *mcp2210_open(const struct lowlevel_device_info *); extern void mcp2210_close(struct mcp2210_device *); extern bool mcp2210_spi_cancel(struct mcp2210_device *); extern bool mcp2210_configure_spi(struct mcp2210_device *, uint32_t bitrate, uint16_t idlechipsel, uint16_t activechipsel, uint16_t chipseltodatadelay, uint16_t lastbytetocsdelay, uint16_t midbytedelay); extern bool mcp2210_set_spimode(struct mcp2210_device *, uint8_t spimode); extern bool mcp2210_spi_transfer(struct mcp2210_device *, const void *tx, void *rx, uint8_t sz); extern bool mcp2210_set_gpio_output(struct mcp2210_device *, int pin, enum mcp2210_gpio_value); extern enum mcp2210_gpio_value mcp2210_get_gpio_input(struct mcp2210_device *, int pin); #endif bfgminer-bfgminer-3.10.0/miner.c000066400000000000000000011313201226556647300164520ustar00rootroot00000000000000/* * Copyright 2011-2013 Con Kolivas * Copyright 2011-2013 Luke Dashjr * Copyright 2012-2013 Andrew Smith * Copyright 2010 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef HAVE_CURSES #ifdef USE_UNICODE #define PDC_WIDE #endif // Must be before stdbool, since pdcurses typedefs bool :/ #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_PWD_H #include #endif #ifndef WIN32 #include #include #else #include #include #endif #include #include #include #include #include #include #include #include #include #include "compat.h" #include "deviceapi.h" #include "logging.h" #include "miner.h" #include "findnonce.h" #include "adl.h" #include "driver-cpu.h" #include "driver-opencl.h" #include "bench_block.h" #include "scrypt.h" #ifdef USE_AVALON #include "driver-avalon.h" #endif #ifdef HAVE_BFG_LOWLEVEL #include "lowlevel.h" #endif #if defined(unix) || defined(__APPLE__) #include #include #include #endif #ifdef USE_SCRYPT #include "scrypt.h" #endif #if defined(USE_AVALON) || defined(USE_BITFORCE) || defined(USE_ICARUS) || defined(USE_MODMINER) || defined(USE_NANOFURY) || defined(USE_X6500) || defined(USE_ZTEX) # define USE_FPGA #endif struct strategies strategies[] = { { "Failover" }, { "Round Robin" }, { "Rotate" }, { "Load Balance" }, { "Balance" }, }; static char packagename[256]; bool opt_protocol; bool opt_dev_protocol; static bool opt_benchmark; static bool want_longpoll = true; static bool want_gbt = true; static bool want_getwork = true; #if BLKMAKER_VERSION > 1 struct _cbscript_t { char *data; size_t sz; }; static struct _cbscript_t opt_coinbase_script; static uint32_t template_nonce; #endif #if BLKMAKER_VERSION > 0 char *opt_coinbase_sig; #endif char *request_target_str; float request_pdiff = 1.0; double request_bdiff; static bool want_stratum = true; bool have_longpoll; int opt_skip_checks; bool want_per_device_stats; bool use_syslog; bool opt_quiet_work_updates; bool opt_quiet; bool opt_realquiet; int loginput_size; bool opt_compact; bool opt_show_procs; const int opt_cutofftemp = 95; int opt_hysteresis = 3; static int opt_retries = -1; int opt_fail_pause = 5; int opt_log_interval = 5; int opt_queue = 1; int opt_scantime = 60; int opt_expiry = 120; int opt_expiry_lp = 3600; int opt_bench_algo = -1; unsigned long long global_hashrate; static bool opt_unittest = false; unsigned long global_quota_gcd = 1; #ifdef HAVE_OPENCL int opt_dynamic_interval = 7; int nDevs; int opt_g_threads = -1; #endif #ifdef USE_SCRYPT static char detect_algo = 1; bool opt_scrypt; #else static char detect_algo; #endif bool opt_restart = true; #ifdef USE_LIBMICROHTTPD #include "httpsrv.h" int httpsrv_port = -1; #endif #ifdef USE_LIBEVENT int stratumsrv_port = -1; #endif struct string_elist *scan_devices; static struct string_elist *opt_set_device_list; bool opt_force_dev_init; static struct string_elist *opt_devices_enabled_list; static bool opt_display_devs; int total_devices; struct cgpu_info **devices; int total_devices_new; struct cgpu_info **devices_new; bool have_opencl; int opt_n_threads = -1; int mining_threads; int num_processors; #ifdef HAVE_CURSES bool use_curses = true; #else bool use_curses; #endif #ifdef HAVE_LIBUSB bool have_libusb; #endif static bool opt_submit_stale = true; static int opt_shares; static int opt_submit_threads = 0x40; bool opt_fail_only; bool opt_autofan; bool opt_autoengine; bool opt_noadl; char *opt_api_allow = NULL; char *opt_api_groups; char *opt_api_description = PACKAGE_STRING; int opt_api_port = 4028; bool opt_api_listen; bool opt_api_mcast; char *opt_api_mcast_addr = API_MCAST_ADDR; char *opt_api_mcast_code = API_MCAST_CODE; char *opt_api_mcast_des = ""; int opt_api_mcast_port = 4028; bool opt_api_network; bool opt_delaynet; bool opt_disable_pool; static bool no_work; char *opt_icarus_options = NULL; char *opt_icarus_timing = NULL; bool opt_worktime; bool opt_weighed_stats; #ifdef USE_AVALON char *opt_avalon_options = NULL; #endif #ifdef USE_KLONDIKE char *opt_klondike_options = NULL; #endif char *opt_kernel_path; char *cgminer_path; #if defined(USE_BITFORCE) bool opt_bfl_noncerange; #endif #define QUIET (opt_quiet || opt_realquiet) struct thr_info *control_thr; struct thr_info **mining_thr; static int gwsched_thr_id; static int watchpool_thr_id; static int watchdog_thr_id; #ifdef HAVE_CURSES static int input_thr_id; #endif int gpur_thr_id; static int api_thr_id; static int total_control_threads; pthread_mutex_t hash_lock; static pthread_mutex_t *stgd_lock; pthread_mutex_t console_lock; cglock_t ch_lock; static pthread_rwlock_t blk_lock; static pthread_mutex_t sshare_lock; pthread_rwlock_t netacc_lock; pthread_rwlock_t mining_thr_lock; pthread_rwlock_t devices_lock; static pthread_mutex_t lp_lock; static pthread_cond_t lp_cond; pthread_cond_t gws_cond; bool shutting_down; double total_rolling; double total_mhashes_done; static struct timeval total_tv_start, total_tv_end; static struct timeval miner_started; cglock_t control_lock; pthread_mutex_t stats_lock; static pthread_mutex_t submitting_lock; static int total_submitting; static struct work *submit_waiting; notifier_t submit_waiting_notifier; int hw_errors; int total_accepted, total_rejected, total_diff1; int total_bad_nonces; int total_getworks, total_stale, total_discarded; uint64_t total_bytes_rcvd, total_bytes_sent; double total_diff_accepted, total_diff_rejected, total_diff_stale; static int staged_rollable; unsigned int new_blocks; unsigned int found_blocks; unsigned int local_work; unsigned int total_go, total_ro; struct pool **pools; static struct pool *currentpool = NULL; int total_pools, enabled_pools; enum pool_strategy pool_strategy = POOL_FAILOVER; int opt_rotate_period; static int total_urls, total_users, total_passes; static #ifndef HAVE_CURSES const #endif bool curses_active; #ifdef HAVE_CURSES #if !(defined(PDCURSES) || defined(NCURSES_VERSION)) const #endif short default_bgcolor = COLOR_BLACK; static int attr_title = A_BOLD; #endif static #if defined(HAVE_CURSES) && defined(USE_UNICODE) bool use_unicode; static bool have_unicode_degrees; static wchar_t unicode_micro = 'u'; #else const bool use_unicode; static const bool have_unicode_degrees; static const char unicode_micro = 'u'; #endif #ifdef HAVE_CURSES #define U8_BAD_START "\xef\x80\x81" #define U8_BAD_END "\xef\x80\x80" #define AS_BAD(x) U8_BAD_START x U8_BAD_END bool selecting_device; unsigned selected_device; #endif static char current_block[40]; /* Protected by ch_lock */ static char *current_hash; static uint32_t current_block_id; char *current_fullhash; static char datestamp[40]; static char blocktime[32]; time_t block_time; static char best_share[8] = "0"; double current_diff = 0xFFFFFFFFFFFFFFFFULL; static char block_diff[8]; static char net_hashrate[10]; uint64_t best_diff = 0; static bool known_blkheight_current; static uint32_t known_blkheight; static uint32_t known_blkheight_blkid; static uint64_t block_subsidy; struct block { char hash[40]; UT_hash_handle hh; int block_no; }; static struct block *blocks = NULL; int swork_id; /* For creating a hash database of stratum shares submitted that have not had * a response yet */ struct stratum_share { UT_hash_handle hh; bool block; struct work *work; int id; }; static struct stratum_share *stratum_shares = NULL; char *opt_socks_proxy = NULL; static const char def_conf[] = "bfgminer.conf"; static bool config_loaded; static int include_count; #define JSON_INCLUDE_CONF "include" #define JSON_LOAD_ERROR "JSON decode of file '%s' failed\n %s" #define JSON_LOAD_ERROR_LEN strlen(JSON_LOAD_ERROR) #define JSON_MAX_DEPTH 10 #define JSON_MAX_DEPTH_ERR "Too many levels of JSON includes (limit 10) or a loop" char *cmd_idle, *cmd_sick, *cmd_dead; #if defined(unix) || defined(__APPLE__) static char *opt_stderr_cmd = NULL; static int forkpid; #endif // defined(unix) #ifdef HAVE_CHROOT char *chroot_dir; #endif #ifdef HAVE_PWD_H char *opt_setuid; #endif struct sigaction termhandler, inthandler; struct thread_q *getq; static int total_work; static bool staged_full; struct work *staged_work = NULL; struct schedtime { bool enable; struct tm tm; }; struct schedtime schedstart; struct schedtime schedstop; bool sched_paused; static bool time_before(struct tm *tm1, struct tm *tm2) { if (tm1->tm_hour < tm2->tm_hour) return true; if (tm1->tm_hour == tm2->tm_hour && tm1->tm_min < tm2->tm_min) return true; return false; } static bool should_run(void) { struct tm tm; time_t tt; bool within_range; if (!schedstart.enable && !schedstop.enable) return true; tt = time(NULL); localtime_r(&tt, &tm); // NOTE: This is delicately balanced so that should_run is always false if schedstart==schedstop if (time_before(&schedstop.tm, &schedstart.tm)) within_range = (time_before(&tm, &schedstop.tm) || !time_before(&tm, &schedstart.tm)); else within_range = (time_before(&tm, &schedstop.tm) && !time_before(&tm, &schedstart.tm)); if (within_range && !schedstop.enable) /* This is a once off event with no stop time set */ schedstart.enable = false; return within_range; } void get_datestamp(char *f, size_t fsiz, time_t tt) { struct tm _tm; struct tm *tm = &_tm; if (tt == INVALID_TIMESTAMP) tt = time(NULL); localtime_r(&tt, tm); snprintf(f, fsiz, "[%d-%02d-%02d %02d:%02d:%02d]", tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); } static void get_timestamp(char *f, size_t fsiz, time_t tt) { struct tm _tm; struct tm *tm = &_tm; localtime_r(&tt, tm); snprintf(f, fsiz, "[%02d:%02d:%02d]", tm->tm_hour, tm->tm_min, tm->tm_sec); } static void applog_and_exit(const char *fmt, ...) FORMAT_SYNTAX_CHECK(printf, 1, 2); static char exit_buf[512]; static void applog_and_exit(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vsnprintf(exit_buf, sizeof(exit_buf), fmt, ap); va_end(ap); _applog(LOG_ERR, exit_buf); exit(1); } char *devpath_to_devid(const char *devpath) { #ifndef WIN32 struct stat my_stat; if (stat(devpath, &my_stat)) return NULL; char *devs = malloc(6 + (sizeof(dev_t) * 2) + 1); memcpy(devs, "dev_t:", 6); bin2hex(&devs[6], &my_stat.st_rdev, sizeof(dev_t)); #else if (!strncmp(devpath, "\\\\.\\", 4)) devpath += 4; if (strncasecmp(devpath, "COM", 3) || !devpath[3]) return NULL; devpath += 3; char *p; strtol(devpath, &p, 10); if (p[0]) return NULL; const int sz = (p - devpath); char *devs = malloc(4 + sz + 1); sprintf(devs, "com:%s", devpath); #endif return devs; } static bool devpaths_match(const char * const ap, const char * const bp) { char * const a = devpath_to_devid(ap); if (!a) return false; char * const b = devpath_to_devid(bp); bool rv = false; if (b) { rv = !strcmp(a, b); free(b); } free(a); return rv; } static int proc_letter_to_number(const char *s, const char ** const rem) { int n = 0, c; for ( ; s[0]; ++s) { if (unlikely(n > INT_MAX / 26)) break; c = tolower(s[0]) - 'a'; if (unlikely(c < 0 || c > 25)) break; if (unlikely(INT_MAX - c < n)) break; n = (n * 26) + c; } *rem = s; return n; } static bool cgpu_match(const char * const pattern, const struct cgpu_info * const cgpu) { // all - matches anything // d0 - matches all processors of device 0 // d0-3 - matches all processors of device 0, 1, 2, or 3 // d0a - matches first processor of device 0 // 0 - matches processor 0 // 0-4 - matches processors 0, 1, 2, 3, or 4 // ___ - matches all processors on all devices using driver/name ___ // ___0 - matches all processors of 0th device using driver/name ___ // ___0a - matches first processor of 0th device using driver/name ___ // @* - matches device with serial or path * // @*@a - matches first processor of device with serial or path * // ___@* - matches device with serial or path * using driver/name ___ if (!strcasecmp(pattern, "all")) return true; const struct device_drv * const drv = cgpu->drv; const char *p = pattern, *p2; size_t L; int n, i, c = -1; int n2; int proc_first = -1, proc_last = -1; struct cgpu_info *device; if (!(strncasecmp(drv->dname, p, (L = strlen(drv->dname))) && strncasecmp(drv-> name, p, (L = strlen(drv-> name))))) // dname or name p = &pattern[L]; else if (p[0] == 'd' && (isdigit(p[1]) || p[1] == '-')) // d# ++p; else if (isdigit(p[0]) || p[0] == '@' || p[0] == '-') // # or @ {} else return false; L = p - pattern; while (isspace(p[0])) ++p; if (p[0] == '@') { // Serial/path const char * const ser = &p[1]; for (p = ser; p[0] != '@' && p[0] != '\0'; ++p) {} p2 = (p[0] == '@') ? &p[1] : p; const size_t serlen = (p - ser); p = ""; n = n2 = 0; const char * const devpath = cgpu->device_path ?: ""; const char * const devser = cgpu->dev_serial ?: ""; if ((!strncmp(devpath, ser, serlen)) && devpath[serlen] == '\0') {} // Match else if ((!strncmp(devser, ser, serlen)) && devser[serlen] == '\0') {} // Match else { char devpath2[serlen + 1]; memcpy(devpath2, ser, serlen); devpath2[serlen] = '\0'; if (!devpaths_match(devpath, ser)) return false; } } else { if (isdigit(p[0])) n = strtol(p, (void*)&p2, 0); else { n = 0; p2 = p; } if (p2[0] == '-') { ++p2; if (p2[0] && isdigit(p2[0])) n2 = strtol(p2, (void*)&p2, 0); else n2 = INT_MAX; } else n2 = n; if (p == pattern) { if (!p[0]) return true; if (p2 && p2[0]) goto invsyntax; for (i = n; i <= n2; ++i) { if (i >= total_devices) break; if (cgpu == devices[i]) return true; } return false; } } if (p2[0]) { proc_first = proc_letter_to_number(&p2[0], &p2); if (p2[0] == '-') { ++p2; if (p2[0]) proc_last = proc_letter_to_number(p2, &p2); else proc_last = INT_MAX; } else proc_last = proc_first; if (p2[0]) goto invsyntax; } if (L > 1 || tolower(pattern[0]) != 'd' || !p[0]) { if ((L == 3 && !strncasecmp(pattern, drv->name, 3)) || (!L) || (L == strlen(drv->dname) && !strncasecmp(pattern, drv->dname, L))) {} // Matched name or dname else return false; if (p[0] && (cgpu->device_id < n || cgpu->device_id > n2)) return false; if (proc_first != -1 && (cgpu->proc_id < proc_first || cgpu->proc_id > proc_last)) return false; return true; } // d# c = -1; for (i = 0; ; ++i) { if (i == total_devices) return false; if (devices[i]->device != devices[i]) continue; ++c; if (c < n) continue; if (c > n2) break; for (device = devices[i]; device; device = device->next_proc) { if (proc_first != -1 && (device->proc_id < proc_first || device->proc_id > proc_last)) continue; if (device == cgpu) return true; } } return false; invsyntax: applog(LOG_WARNING, "%s: Invalid syntax: %s", __func__, pattern); return false; } #define TEST_CGPU_MATCH(pattern) \ if (!cgpu_match(pattern, &cgpu)) \ applog(LOG_ERR, "%s: Pattern \"%s\" should have matched!", __func__, pattern); \ // END TEST_CGPU_MATCH #define TEST_CGPU_NOMATCH(pattern) \ if (cgpu_match(pattern, &cgpu)) \ applog(LOG_ERR, "%s: Pattern \"%s\" should NOT have matched!", __func__, pattern); \ // END TEST_CGPU_MATCH static __maybe_unused void test_cgpu_match() { struct device_drv drv = { .dname = "test", .name = "TST", }; struct cgpu_info cgpu = { .drv = &drv, .device = &cgpu, .device_id = 1, .proc_id = 1, .proc_repr = "TST 1b", }, cgpu0a = { .drv = &drv, .device = &cgpu0a, .device_id = 0, .proc_id = 0, .proc_repr = "TST 0a", }, cgpu1a = { .drv = &drv, .device = &cgpu0a, .device_id = 1, .proc_id = 0, .proc_repr = "TST 1a", }; struct cgpu_info *devices_list[3] = {&cgpu0a, &cgpu1a, &cgpu,}; devices = devices_list; total_devices = 3; TEST_CGPU_MATCH("all") TEST_CGPU_MATCH("d1") TEST_CGPU_NOMATCH("d2") TEST_CGPU_MATCH("d0-5") TEST_CGPU_NOMATCH("d0-0") TEST_CGPU_NOMATCH("d2-5") TEST_CGPU_MATCH("d-1") TEST_CGPU_MATCH("d1-") TEST_CGPU_NOMATCH("d-0") TEST_CGPU_NOMATCH("d2-") TEST_CGPU_MATCH("2") TEST_CGPU_NOMATCH("3") TEST_CGPU_MATCH("1-2") TEST_CGPU_MATCH("2-3") TEST_CGPU_NOMATCH("1-1") TEST_CGPU_NOMATCH("3-4") TEST_CGPU_MATCH("TST") TEST_CGPU_MATCH("test") TEST_CGPU_MATCH("tst") TEST_CGPU_MATCH("TEST") TEST_CGPU_NOMATCH("TSF") TEST_CGPU_NOMATCH("TS") TEST_CGPU_NOMATCH("TSTF") TEST_CGPU_MATCH("TST1") TEST_CGPU_MATCH("test1") TEST_CGPU_MATCH("TST0-1") TEST_CGPU_MATCH("TST 1") TEST_CGPU_MATCH("TST 1-2") TEST_CGPU_MATCH("TEST 1-2") TEST_CGPU_NOMATCH("TST2") TEST_CGPU_NOMATCH("TST2-3") TEST_CGPU_NOMATCH("TST0-0") TEST_CGPU_MATCH("TST1b") TEST_CGPU_MATCH("tst1b") TEST_CGPU_NOMATCH("TST1c") TEST_CGPU_NOMATCH("TST1bb") TEST_CGPU_MATCH("TST0-1b") TEST_CGPU_NOMATCH("TST0-1c") TEST_CGPU_MATCH("TST1a-d") TEST_CGPU_NOMATCH("TST1a-a") TEST_CGPU_NOMATCH("TST1-a") TEST_CGPU_NOMATCH("TST1c-z") TEST_CGPU_NOMATCH("TST1c-") TEST_CGPU_MATCH("@") TEST_CGPU_NOMATCH("@abc") TEST_CGPU_MATCH("@@b") TEST_CGPU_NOMATCH("@@c") TEST_CGPU_MATCH("TST@") TEST_CGPU_NOMATCH("TST@abc") TEST_CGPU_MATCH("TST@@b") TEST_CGPU_NOMATCH("TST@@c") TEST_CGPU_MATCH("TST@@b-f") TEST_CGPU_NOMATCH("TST@@c-f") TEST_CGPU_NOMATCH("TST@@-a") cgpu.device_path = "/dev/test"; cgpu.dev_serial = "testy"; TEST_CGPU_MATCH("TST@/dev/test") TEST_CGPU_MATCH("TST@testy") TEST_CGPU_NOMATCH("TST@") TEST_CGPU_NOMATCH("TST@/dev/test5@b") TEST_CGPU_NOMATCH("TST@testy3@b") TEST_CGPU_MATCH("TST@/dev/test@b") TEST_CGPU_MATCH("TST@testy@b") TEST_CGPU_NOMATCH("TST@/dev/test@c") TEST_CGPU_NOMATCH("TST@testy@c") cgpu.device_path = "usb:000:999"; TEST_CGPU_MATCH("TST@usb:000:999") drv.dname = "test7"; TEST_CGPU_MATCH("test7") TEST_CGPU_MATCH("TEST7") TEST_CGPU_NOMATCH("test&") TEST_CGPU_MATCH("test7 1-2") TEST_CGPU_MATCH("test7@testy@b") } static int cgpu_search(const char * const pattern, const int first) { int i; struct cgpu_info *cgpu; #define CHECK_CGPU_SEARCH do{ \ cgpu = get_devices(i); \ if (cgpu_match(pattern, cgpu)) \ return i; \ }while(0) for (i = first; i < total_devices; ++i) CHECK_CGPU_SEARCH; for (i = 0; i < first; ++i) CHECK_CGPU_SEARCH; #undef CHECK_CGPU_SEARCH return -1; } static pthread_mutex_t sharelog_lock; static FILE *sharelog_file = NULL; struct thr_info *get_thread(int thr_id) { struct thr_info *thr; rd_lock(&mining_thr_lock); thr = mining_thr[thr_id]; rd_unlock(&mining_thr_lock); return thr; } static struct cgpu_info *get_thr_cgpu(int thr_id) { struct thr_info *thr = get_thread(thr_id); return thr->cgpu; } struct cgpu_info *get_devices(int id) { struct cgpu_info *cgpu; rd_lock(&devices_lock); cgpu = devices[id]; rd_unlock(&devices_lock); return cgpu; } static pthread_mutex_t noncelog_lock = PTHREAD_MUTEX_INITIALIZER; static FILE *noncelog_file = NULL; static void noncelog(const struct work * const work) { const int thr_id = work->thr_id; const struct cgpu_info *proc = get_thr_cgpu(thr_id); char buf[0x200], hash[65], data[161], midstate[65]; int rv; size_t ret; bin2hex(hash, work->hash, 32); bin2hex(data, work->data, 80); bin2hex(midstate, work->midstate, 32); // timestamp,proc,hash,data,midstate rv = snprintf(buf, sizeof(buf), "%lu,%s,%s,%s,%s\n", (unsigned long)time(NULL), proc->proc_repr_ns, hash, data, midstate); if (unlikely(rv < 1)) { applog(LOG_ERR, "noncelog printf error"); return; } mutex_lock(&noncelog_lock); ret = fwrite(buf, rv, 1, noncelog_file); fflush(noncelog_file); mutex_unlock(&noncelog_lock); if (ret != 1) applog(LOG_ERR, "noncelog fwrite error"); } static void sharelog(const char*disposition, const struct work*work) { char target[(sizeof(work->target) * 2) + 1]; char hash[(sizeof(work->hash) * 2) + 1]; char data[(sizeof(work->data) * 2) + 1]; struct cgpu_info *cgpu; unsigned long int t; struct pool *pool; int thr_id, rv; char s[1024]; size_t ret; if (!sharelog_file) return; thr_id = work->thr_id; cgpu = get_thr_cgpu(thr_id); pool = work->pool; t = work->ts_getwork + timer_elapsed(&work->tv_getwork, &work->tv_work_found); bin2hex(target, work->target, sizeof(work->target)); bin2hex(hash, work->hash, sizeof(work->hash)); bin2hex(data, work->data, sizeof(work->data)); // timestamp,disposition,target,pool,dev,thr,sharehash,sharedata rv = snprintf(s, sizeof(s), "%lu,%s,%s,%s,%s,%u,%s,%s\n", t, disposition, target, pool->rpc_url, cgpu->proc_repr_ns, thr_id, hash, data); if (rv >= (int)(sizeof(s))) s[sizeof(s) - 1] = '\0'; else if (rv < 0) { applog(LOG_ERR, "sharelog printf error"); return; } mutex_lock(&sharelog_lock); ret = fwrite(s, rv, 1, sharelog_file); fflush(sharelog_file); mutex_unlock(&sharelog_lock); if (ret != 1) applog(LOG_ERR, "sharelog fwrite error"); } static char *getwork_req = "{\"method\": \"getwork\", \"params\": [], \"id\":0}\n"; /* Adjust all the pools' quota to the greatest common denominator after a pool * has been added or the quotas changed. */ void adjust_quota_gcd(void) { unsigned long gcd, lowest_quota = ~0UL, quota; struct pool *pool; int i; for (i = 0; i < total_pools; i++) { pool = pools[i]; quota = pool->quota; if (!quota) continue; if (quota < lowest_quota) lowest_quota = quota; } if (likely(lowest_quota < ~0UL)) { gcd = lowest_quota; for (i = 0; i < total_pools; i++) { pool = pools[i]; quota = pool->quota; if (!quota) continue; while (quota % gcd) gcd--; } } else gcd = 1; for (i = 0; i < total_pools; i++) { pool = pools[i]; pool->quota_used *= global_quota_gcd; pool->quota_used /= gcd; pool->quota_gcd = pool->quota / gcd; } global_quota_gcd = gcd; applog(LOG_DEBUG, "Global quota greatest common denominator set to %lu", gcd); } /* Return value is ignored if not called from add_pool_details */ struct pool *add_pool(void) { struct pool *pool; pool = calloc(sizeof(struct pool), 1); if (!pool) quit(1, "Failed to malloc pool in add_pool"); pool->pool_no = pool->prio = total_pools; mutex_init(&pool->last_work_lock); mutex_init(&pool->pool_lock); if (unlikely(pthread_cond_init(&pool->cr_cond, NULL))) quit(1, "Failed to pthread_cond_init in add_pool"); cglock_init(&pool->data_lock); mutex_init(&pool->stratum_lock); timer_unset(&pool->swork.tv_transparency); /* Make sure the pool doesn't think we've been idle since time 0 */ pool->tv_idle.tv_sec = ~0UL; cgtime(&pool->cgminer_stats.start_tv); pool->rpc_proxy = NULL; pool->quota = 1; adjust_quota_gcd(); pool->sock = INVSOCK; pool->lp_socket = CURL_SOCKET_BAD; pools = realloc(pools, sizeof(struct pool *) * (total_pools + 2)); pools[total_pools++] = pool; return pool; } /* Pool variant of test and set */ static bool pool_tset(struct pool *pool, bool *var) { bool ret; mutex_lock(&pool->pool_lock); ret = *var; *var = true; mutex_unlock(&pool->pool_lock); return ret; } bool pool_tclear(struct pool *pool, bool *var) { bool ret; mutex_lock(&pool->pool_lock); ret = *var; *var = false; mutex_unlock(&pool->pool_lock); return ret; } struct pool *current_pool(void) { struct pool *pool; cg_rlock(&control_lock); pool = currentpool; cg_runlock(&control_lock); return pool; } char *set_int_range(const char *arg, int *i, int min, int max) { char *err = opt_set_intval(arg, i); if (err) return err; if (*i < min || *i > max) return "Value out of range"; return NULL; } static char *set_int_0_to_9999(const char *arg, int *i) { return set_int_range(arg, i, 0, 9999); } static char *set_int_1_to_65535(const char *arg, int *i) { return set_int_range(arg, i, 1, 65535); } static char *set_int_0_to_10(const char *arg, int *i) { return set_int_range(arg, i, 0, 10); } static char *set_int_1_to_10(const char *arg, int *i) { return set_int_range(arg, i, 1, 10); } char *set_strdup(const char *arg, char **p) { *p = strdup((char *)arg); return NULL; } #if BLKMAKER_VERSION > 1 static char *set_b58addr(const char *arg, struct _cbscript_t *p) { size_t scriptsz = blkmk_address_to_script(NULL, 0, arg); if (!scriptsz) return "Invalid address"; char *script = malloc(scriptsz); if (blkmk_address_to_script(script, scriptsz, arg) != scriptsz) { free(script); return "Failed to convert address to script"; } p->data = script; p->sz = scriptsz; return NULL; } #endif static void bdiff_target_leadzero(unsigned char *target, double diff); char *set_request_diff(const char *arg, float *p) { unsigned char target[32]; char *e = opt_set_floatval(arg, p); if (e) return e; request_bdiff = (double)*p * 0.9999847412109375; bdiff_target_leadzero(target, request_bdiff); request_target_str = malloc(65); bin2hex(request_target_str, target, 32); return NULL; } #ifdef NEED_BFG_LOWL_VCOM extern struct lowlevel_device_info *_vcom_devinfo_findorcreate(struct lowlevel_device_info **, const char *); #ifdef WIN32 void _vcom_devinfo_scan_querydosdevice(struct lowlevel_device_info ** const devinfo_list) { char dev[PATH_MAX]; char *devp = dev; size_t bufLen = 0x100; tryagain: ; char buf[bufLen]; if (!QueryDosDevice(NULL, buf, bufLen)) { if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { bufLen *= 2; applog(LOG_DEBUG, "QueryDosDevice returned insufficent buffer error; enlarging to %lx", (unsigned long)bufLen); goto tryagain; } applogr(, LOG_WARNING, "Error occurred trying to enumerate COM ports with QueryDosDevice"); } size_t tLen; memcpy(devp, "\\\\.\\", 4); devp = &devp[4]; for (char *t = buf; *t; t += tLen) { tLen = strlen(t) + 1; if (strncmp("COM", t, 3)) continue; memcpy(devp, t, tLen); // NOTE: We depend on _vcom_devinfo_findorcreate to further check that there's a number (and only a number) on the end _vcom_devinfo_findorcreate(devinfo_list, dev); } } #else void _vcom_devinfo_scan_lsdev(struct lowlevel_device_info ** const devinfo_list) { char dev[PATH_MAX]; char *devp = dev; DIR *D; struct dirent *de; const char devdir[] = "/dev"; const size_t devdirlen = sizeof(devdir) - 1; char *devpath = devp; char *devfile = devpath + devdirlen + 1; D = opendir(devdir); if (!D) applogr(, LOG_DEBUG, "No /dev directory to look for VCOM devices in"); memcpy(devpath, devdir, devdirlen); devpath[devdirlen] = '/'; while ( (de = readdir(D)) ) { if (!strncmp(de->d_name, "cu.", 3) //don't probe Bluetooth devices - causes bus errors and segfaults && strncmp(de->d_name, "cu.Bluetooth", 12)) goto trydev; if (strncmp(de->d_name, "tty", 3)) continue; if (strncmp(&de->d_name[3], "USB", 3) && strncmp(&de->d_name[3], "ACM", 3)) continue; trydev: strcpy(devfile, de->d_name); _vcom_devinfo_findorcreate(devinfo_list, dev); } closedir(D); } #endif #endif static char *add_serial(const char *arg) { string_elist_add(arg, &scan_devices); return NULL; } static char *opt_string_elist_add(const char *arg, struct string_elist **elist) { string_elist_add(arg, elist); return NULL; } bool get_intrange(const char *arg, int *val1, int *val2) { // NOTE: This could be done with sscanf, but its %n is broken in strange ways on Windows char *p, *p2; *val1 = strtol(arg, &p, 0); if (arg == p) // Zero-length ending number, invalid return false; while (true) { if (!p[0]) { *val2 = *val1; return true; } if (p[0] == '-') break; if (!isspace(p[0])) // Garbage, invalid return false; ++p; } p2 = &p[1]; *val2 = strtol(p2, &p, 0); if (p2 == p) // Zero-length ending number, invalid return false; while (true) { if (!p[0]) return true; if (!isspace(p[0])) // Garbage, invalid return false; ++p; } } static void _test_intrange(const char *s, const int v[2]) { int a[2]; if (!get_intrange(s, &a[0], &a[1])) applog(LOG_ERR, "Test \"%s\" failed: returned false", s); for (int i = 0; i < 2; ++i) if (unlikely(a[i] != v[i])) applog(LOG_ERR, "Test \"%s\" failed: value %d should be %d but got %d", s, i, v[i], a[i]); } #define _test_intrange(s, ...) _test_intrange(s, (int[]){ __VA_ARGS__ }) static void _test_intrange_fail(const char *s) { int a[2]; if (get_intrange(s, &a[0], &a[1])) applog(LOG_ERR, "Test !\"%s\" failed: returned true with %d and %d", s, a[0], a[1]); } static void test_intrange() { _test_intrange("-1--2", -1, -2); _test_intrange("-1-2", -1, 2); _test_intrange("1--2", 1, -2); _test_intrange("1-2", 1, 2); _test_intrange("111-222", 111, 222); _test_intrange(" 11 - 22 ", 11, 22); _test_intrange("+11-+22", 11, 22); _test_intrange("-1", -1, -1); _test_intrange_fail("all"); _test_intrange_fail("1-"); _test_intrange_fail(""); _test_intrange_fail("1-54x"); } static char *set_devices(char *arg) { if (*arg) { if (*arg == '?') { opt_display_devs = true; return NULL; } } else return "Invalid device parameters"; string_elist_add(arg, &opt_devices_enabled_list); return NULL; } static char *set_balance(enum pool_strategy *strategy) { *strategy = POOL_BALANCE; return NULL; } static char *set_loadbalance(enum pool_strategy *strategy) { *strategy = POOL_LOADBALANCE; return NULL; } static char *set_rotate(const char *arg, int *i) { pool_strategy = POOL_ROTATE; return set_int_range(arg, i, 0, 9999); } static char *set_rr(enum pool_strategy *strategy) { *strategy = POOL_ROUNDROBIN; return NULL; } /* Detect that url is for a stratum protocol either via the presence of * stratum+tcp or by detecting a stratum server response */ bool detect_stratum(struct pool *pool, char *url) { if (!extract_sockaddr(url, &pool->sockaddr_url, &pool->stratum_port)) return false; if (!strncasecmp(url, "stratum+tcp://", 14)) { pool->rpc_url = strdup(url); pool->has_stratum = true; pool->stratum_url = pool->sockaddr_url; return true; } return false; } static struct pool *add_url(void) { total_urls++; if (total_urls > total_pools) add_pool(); return pools[total_urls - 1]; } static void setup_url(struct pool *pool, char *arg) { if (detect_stratum(pool, arg)) return; opt_set_charp(arg, &pool->rpc_url); if (strncmp(arg, "http://", 7) && strncmp(arg, "https://", 8)) { const size_t L = strlen(arg); char *httpinput; httpinput = malloc(8 + L); if (!httpinput) quit(1, "Failed to malloc httpinput"); sprintf(httpinput, "http://%s", arg); pool->rpc_url = httpinput; } } static char *set_url(char *arg) { struct pool *pool = add_url(); setup_url(pool, arg); return NULL; } static char *set_quota(char *arg) { char *semicolon = strchr(arg, ';'), *url; int len, qlen, quota; struct pool *pool; if (!semicolon) return "No semicolon separated quota;URL pair found"; len = strlen(arg); *semicolon = '\0'; qlen = strlen(arg); if (!qlen) return "No parameter for quota found"; len -= qlen + 1; if (len < 1) return "No parameter for URL found"; quota = atoi(arg); if (quota < 0) return "Invalid negative parameter for quota set"; url = arg + qlen + 1; pool = add_url(); setup_url(pool, url); pool->quota = quota; applog(LOG_INFO, "Setting pool %d to quota %d", pool->pool_no, pool->quota); adjust_quota_gcd(); return NULL; } static char *set_user(const char *arg) { struct pool *pool; total_users++; if (total_users > total_pools) add_pool(); pool = pools[total_users - 1]; opt_set_charp(arg, &pool->rpc_user); return NULL; } static char *set_pass(const char *arg) { struct pool *pool; total_passes++; if (total_passes > total_pools) add_pool(); pool = pools[total_passes - 1]; opt_set_charp(arg, &pool->rpc_pass); return NULL; } static char *set_userpass(const char *arg) { struct pool *pool; char *updup; if (total_users != total_passes) return "User + pass options must be balanced before userpass"; ++total_users; ++total_passes; if (total_users > total_pools) add_pool(); pool = pools[total_users - 1]; updup = strdup(arg); opt_set_charp(arg, &pool->rpc_userpass); pool->rpc_user = strtok(updup, ":"); if (!pool->rpc_user) return "Failed to find : delimited user info"; pool->rpc_pass = strtok(NULL, ":"); if (!pool->rpc_pass) pool->rpc_pass = ""; return NULL; } static char *set_pool_priority(const char *arg) { struct pool *pool; if (!total_pools) return "Usage of --pool-priority before pools are defined does not make sense"; pool = pools[total_pools - 1]; opt_set_intval(arg, &pool->prio); return NULL; } static char *set_pool_proxy(const char *arg) { struct pool *pool; if (!total_pools) return "Usage of --pool-proxy before pools are defined does not make sense"; if (!our_curl_supports_proxy_uris()) return "Your installed cURL library does not support proxy URIs. At least version 7.21.7 is required."; pool = pools[total_pools - 1]; opt_set_charp(arg, &pool->rpc_proxy); return NULL; } static char *set_pool_force_rollntime(const char *arg) { struct pool *pool; if (!total_pools) return "Usage of --force-rollntime before pools are defined does not make sense"; pool = pools[total_pools - 1]; opt_set_intval(arg, &pool->force_rollntime); return NULL; } static char *enable_debug(bool *flag) { *flag = true; opt_debug_console = true; /* Turn on verbose output, too. */ opt_log_output = true; return NULL; } static char *set_schedtime(const char *arg, struct schedtime *st) { if (sscanf(arg, "%d:%d", &st->tm.tm_hour, &st->tm.tm_min) != 2) { if (strcasecmp(arg, "now")) return "Invalid time set, should be HH:MM"; } else schedstop.tm.tm_sec = 0; if (st->tm.tm_hour > 23 || st->tm.tm_min > 59 || st->tm.tm_hour < 0 || st->tm.tm_min < 0) return "Invalid time set."; st->enable = true; return NULL; } static char *set_log_file(char *arg) { char *r = ""; long int i = strtol(arg, &r, 10); int fd, stderr_fd = fileno(stderr); if ((!*r) && i >= 0 && i <= INT_MAX) fd = i; else if (!strcmp(arg, "-")) { fd = fileno(stdout); if (unlikely(fd == -1)) return "Standard output missing for log-file"; } else { fd = open(arg, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR | S_IWUSR); if (unlikely(fd == -1)) return "Failed to open log-file"; } close(stderr_fd); if (unlikely(-1 == dup2(fd, stderr_fd))) return "Failed to dup2 for log-file"; close(fd); return NULL; } static char *_bfgopt_set_file(const char *arg, FILE **F, const char *mode, const char *purpose) { char *r = ""; long int i = strtol(arg, &r, 10); static char *err = NULL; const size_t errbufsz = 0x100; free(err); err = NULL; if ((!*r) && i >= 0 && i <= INT_MAX) { *F = fdopen((int)i, mode); if (!*F) { err = malloc(errbufsz); snprintf(err, errbufsz, "Failed to open fd %d for %s", (int)i, purpose); return err; } } else if (!strcmp(arg, "-")) { *F = (mode[0] == 'a') ? stdout : stdin; if (!*F) { err = malloc(errbufsz); snprintf(err, errbufsz, "Standard %sput missing for %s", (mode[0] == 'a') ? "out" : "in", purpose); return err; } } else { *F = fopen(arg, mode); if (!*F) { err = malloc(errbufsz); snprintf(err, errbufsz, "Failed to open %s for %s", arg, purpose); return err; } } return NULL; } static char *set_noncelog(char *arg) { return _bfgopt_set_file(arg, &noncelog_file, "a", "nonce log"); } static char *set_sharelog(char *arg) { return _bfgopt_set_file(arg, &sharelog_file, "a", "share log"); } static char *temp_cutoff_str = ""; static char *temp_target_str = ""; char *set_temp_cutoff(char *arg) { int val; if (!(arg && arg[0])) return "Invalid parameters for set temp cutoff"; val = atoi(arg); if (val < 0 || val > 200) return "Invalid value passed to set temp cutoff"; temp_cutoff_str = arg; return NULL; } char *set_temp_target(char *arg) { int val; if (!(arg && arg[0])) return "Invalid parameters for set temp target"; val = atoi(arg); if (val < 0 || val > 200) return "Invalid value passed to set temp target"; temp_target_str = arg; return NULL; } // For a single element string, this always returns the number (for all calls) // For multi-element strings, it returns each element as a number in order, and 0 when there are no more static int temp_strtok(char *base, char **n) { char *i = *n; char *p = strchr(i, ','); if (p) { p[0] = '\0'; *n = &p[1]; } else if (base != i) *n = strchr(i, '\0'); return atoi(i); } static void load_temp_config_cgpu(struct cgpu_info *cgpu, char **cutoff_np, char **target_np) { int target_off, val; // cutoff default may be specified by driver during probe; otherwise, opt_cutofftemp (const) if (!cgpu->cutofftemp) cgpu->cutofftemp = opt_cutofftemp; // target default may be specified by driver, and is moved with offset; otherwise, offset minus 6 if (cgpu->targettemp) target_off = cgpu->targettemp - cgpu->cutofftemp; else target_off = -6; cgpu->cutofftemp_default = cgpu->cutofftemp; val = temp_strtok(temp_cutoff_str, cutoff_np); if (val < 0 || val > 200) quit(1, "Invalid value passed to set temp cutoff"); if (val) cgpu->cutofftemp = val; cgpu->targettemp_default = cgpu->cutofftemp + target_off; val = temp_strtok(temp_target_str, target_np); if (val < 0 || val > 200) quit(1, "Invalid value passed to set temp target"); if (val) cgpu->targettemp = val; else cgpu->targettemp = cgpu->cutofftemp + target_off; applog(LOG_DEBUG, "%"PRIprepr": Set temperature config: target=%d cutoff=%d", cgpu->proc_repr, cgpu->targettemp, cgpu->cutofftemp); } static void load_temp_config() { int i; char *cutoff_n, *target_n; struct cgpu_info *cgpu; cutoff_n = temp_cutoff_str; target_n = temp_target_str; for (i = 0; i < total_devices; ++i) { cgpu = get_devices(i); load_temp_config_cgpu(cgpu, &cutoff_n, &target_n); } if (cutoff_n != temp_cutoff_str && cutoff_n[0]) quit(1, "Too many values passed to set temp cutoff"); if (target_n != temp_target_str && target_n[0]) quit(1, "Too many values passed to set temp target"); } static char *set_api_allow(const char *arg) { opt_set_charp(arg, &opt_api_allow); return NULL; } static char *set_api_groups(const char *arg) { opt_set_charp(arg, &opt_api_groups); return NULL; } static char *set_api_description(const char *arg) { opt_set_charp(arg, &opt_api_description); return NULL; } static char *set_api_mcast_des(const char *arg) { opt_set_charp(arg, &opt_api_mcast_des); return NULL; } #ifdef USE_ICARUS static char *set_icarus_options(const char *arg) { opt_set_charp(arg, &opt_icarus_options); return NULL; } static char *set_icarus_timing(const char *arg) { opt_set_charp(arg, &opt_icarus_timing); return NULL; } #endif #ifdef USE_AVALON static char *set_avalon_options(const char *arg) { opt_set_charp(arg, &opt_avalon_options); return NULL; } #endif #ifdef USE_KLONDIKE static char *set_klondike_options(const char *arg) { opt_set_charp(arg, &opt_klondike_options); return NULL; } #endif __maybe_unused static char *set_null(const char __maybe_unused *arg) { return NULL; } /* These options are available from config file or commandline */ static struct opt_table opt_config_table[] = { #ifdef WANT_CPUMINE OPT_WITH_ARG("--algo|-a", set_algo, show_algo, &opt_algo, "Specify sha256 implementation for CPU mining:\n" "\tfastauto*\tQuick benchmark at startup to pick a working algorithm\n" "\tauto\t\tBenchmark at startup and pick fastest algorithm" "\n\tc\t\tLinux kernel sha256, implemented in C" #ifdef WANT_SSE2_4WAY "\n\t4way\t\ttcatm's 4-way SSE2 implementation" #endif #ifdef WANT_VIA_PADLOCK "\n\tvia\t\tVIA padlock implementation" #endif "\n\tcryptopp\tCrypto++ C/C++ implementation" #ifdef WANT_CRYPTOPP_ASM32 "\n\tcryptopp_asm32\tCrypto++ 32-bit assembler implementation" #endif #ifdef WANT_X8632_SSE2 "\n\tsse2_32\t\tSSE2 32 bit implementation for i386 machines" #endif #ifdef WANT_X8664_SSE2 "\n\tsse2_64\t\tSSE2 64 bit implementation for x86_64 machines" #endif #ifdef WANT_X8664_SSE4 "\n\tsse4_64\t\tSSE4.1 64 bit implementation for x86_64 machines" #endif #ifdef WANT_ALTIVEC_4WAY "\n\taltivec_4way\tAltivec implementation for PowerPC G4 and G5 machines" #endif ), #endif OPT_WITH_ARG("--api-allow", set_api_allow, NULL, NULL, "Allow API access only to the given list of [G:]IP[/Prefix] addresses[/subnets]"), OPT_WITH_ARG("--api-description", set_api_description, NULL, NULL, "Description placed in the API status header, default: BFGMiner version"), OPT_WITH_ARG("--api-groups", set_api_groups, NULL, NULL, "API one letter groups G:cmd:cmd[,P:cmd:*...] defining the cmds a groups can use"), OPT_WITHOUT_ARG("--api-listen", opt_set_bool, &opt_api_listen, "Enable API, default: disabled"), OPT_WITHOUT_ARG("--api-mcast", opt_set_bool, &opt_api_mcast, "Enable API Multicast listener, default: disabled"), OPT_WITH_ARG("--api-mcast-addr", opt_set_charp, opt_show_charp, &opt_api_mcast_addr, "API Multicast listen address"), OPT_WITH_ARG("--api-mcast-code", opt_set_charp, opt_show_charp, &opt_api_mcast_code, "Code expected in the API Multicast message, don't use '-'"), OPT_WITH_ARG("--api-mcast-des", set_api_mcast_des, NULL, NULL, "Description appended to the API Multicast reply, default: ''"), OPT_WITH_ARG("--api-mcast-port", set_int_1_to_65535, opt_show_intval, &opt_api_mcast_port, "API Multicast listen port"), OPT_WITHOUT_ARG("--api-network", opt_set_bool, &opt_api_network, "Allow API (if enabled) to listen on/for any address, default: only 127.0.0.1"), OPT_WITH_ARG("--api-port", set_int_1_to_65535, opt_show_intval, &opt_api_port, "Port number of miner API"), #ifdef HAVE_ADL OPT_WITHOUT_ARG("--auto-fan", opt_set_bool, &opt_autofan, "Automatically adjust all GPU fan speeds to maintain a target temperature"), OPT_WITHOUT_ARG("--auto-gpu", opt_set_bool, &opt_autoengine, "Automatically adjust all GPU engine clock speeds to maintain a target temperature"), #endif OPT_WITHOUT_ARG("--balance", set_balance, &pool_strategy, "Change multipool strategy from failover to even share balance"), OPT_WITHOUT_ARG("--benchmark", opt_set_bool, &opt_benchmark, "Run BFGMiner in benchmark mode - produces no shares"), #if defined(USE_BITFORCE) OPT_WITHOUT_ARG("--bfl-range", opt_set_bool, &opt_bfl_noncerange, "Use nonce range on bitforce devices if supported"), #endif #ifdef WANT_CPUMINE OPT_WITH_ARG("--bench-algo|-b", set_int_0_to_9999, opt_show_intval, &opt_bench_algo, opt_hidden), #endif #ifdef HAVE_CHROOT OPT_WITH_ARG("--chroot-dir", opt_set_charp, NULL, &chroot_dir, "Chroot to a directory right after startup"), #endif OPT_WITH_ARG("--cmd-idle", opt_set_charp, NULL, &cmd_idle, "Execute a command when a device is allowed to be idle (rest or wait)"), OPT_WITH_ARG("--cmd-sick", opt_set_charp, NULL, &cmd_sick, "Execute a command when a device is declared sick"), OPT_WITH_ARG("--cmd-dead", opt_set_charp, NULL, &cmd_dead, "Execute a command when a device is declared dead"), #if BLKMAKER_VERSION > 1 OPT_WITH_ARG("--coinbase-addr", set_b58addr, NULL, &opt_coinbase_script, "Set coinbase payout address for solo mining"), OPT_WITH_ARG("--coinbase-address|--coinbase-payout|--cbaddress|--cbaddr|--cb-address|--cb-addr|--payout", set_b58addr, NULL, &opt_coinbase_script, opt_hidden), #endif #if BLKMAKER_VERSION > 0 OPT_WITH_ARG("--coinbase-sig", set_strdup, NULL, &opt_coinbase_sig, "Set coinbase signature when possible"), OPT_WITH_ARG("--coinbase|--cbsig|--cb-sig|--cb|--prayer", set_strdup, NULL, &opt_coinbase_sig, opt_hidden), #endif #ifdef HAVE_CURSES OPT_WITHOUT_ARG("--compact", opt_set_bool, &opt_compact, "Use compact display without per device statistics"), #endif #ifdef WANT_CPUMINE OPT_WITH_ARG("--cpu-threads|-t", force_nthreads_int, opt_show_intval, &opt_n_threads, "Number of miner CPU threads"), #endif OPT_WITHOUT_ARG("--debug|-D", enable_debug, &opt_debug, "Enable debug output"), OPT_WITHOUT_ARG("--debuglog", opt_set_bool, &opt_debug, "Enable debug logging"), OPT_WITHOUT_ARG("--device-protocol-dump", opt_set_bool, &opt_dev_protocol, "Verbose dump of device protocol-level activities"), OPT_WITH_ARG("--device|-d", set_devices, NULL, NULL, "Enable only devices matching pattern (default: all)"), OPT_WITHOUT_ARG("--disable-rejecting", opt_set_bool, &opt_disable_pool, "Automatically disable pools that continually reject shares"), #ifdef USE_LIBMICROHTTPD OPT_WITH_ARG("--http-port", opt_set_intval, opt_show_intval, &httpsrv_port, "Port number to listen on for HTTP getwork miners (-1 means disabled)"), #endif #if defined(WANT_CPUMINE) && (defined(HAVE_OPENCL) || defined(USE_FPGA)) OPT_WITHOUT_ARG("--enable-cpu|-C", opt_set_bool, &opt_usecpu, opt_hidden), #endif OPT_WITH_ARG("--expiry|-E", set_int_0_to_9999, opt_show_intval, &opt_expiry, "Upper bound on how many seconds after getting work we consider a share from it stale (w/o longpoll active)"), OPT_WITH_ARG("--expiry-lp", set_int_0_to_9999, opt_show_intval, &opt_expiry_lp, "Upper bound on how many seconds after getting work we consider a share from it stale (with longpoll active)"), OPT_WITHOUT_ARG("--failover-only", opt_set_bool, &opt_fail_only, "Don't leak work to backup pools when primary pool is lagging"), #ifdef USE_FPGA OPT_WITHOUT_ARG("--force-dev-init", opt_set_bool, &opt_force_dev_init, "Always initialize devices when possible (such as bitstream uploads to some FPGAs)"), #endif #ifdef HAVE_OPENCL OPT_WITH_ARG("--gpu-dyninterval", set_int_1_to_65535, opt_show_intval, &opt_dynamic_interval, "Set the refresh interval in ms for GPUs using dynamic intensity"), OPT_WITH_ARG("--gpu-platform", set_int_0_to_9999, opt_show_intval, &opt_platform_id, "Select OpenCL platform ID to use for GPU mining"), OPT_WITH_ARG("--gpu-threads|-g", set_int_1_to_10, opt_show_intval, &opt_g_threads, "Number of threads per GPU (1 - 10)"), #ifdef HAVE_ADL OPT_WITH_ARG("--gpu-engine", set_gpu_engine, NULL, NULL, "GPU engine (over)clock range in MHz - one value, range and/or comma separated list (e.g. 850-900,900,750-850)"), OPT_WITH_ARG("--gpu-fan", set_gpu_fan, NULL, NULL, "GPU fan percentage range - one value, range and/or comma separated list (e.g. 0-85,85,65)"), OPT_WITH_ARG("--gpu-map", set_gpu_map, NULL, NULL, "Map OpenCL to ADL device order manually, paired CSV (e.g. 1:0,2:1 maps OpenCL 1 to ADL 0, 2 to 1)"), OPT_WITH_ARG("--gpu-memclock", set_gpu_memclock, NULL, NULL, "Set the GPU memory (over)clock in MHz - one value for all or separate by commas for per card"), OPT_WITH_ARG("--gpu-memdiff", set_gpu_memdiff, NULL, NULL, "Set a fixed difference in clock speed between the GPU and memory in auto-gpu mode"), OPT_WITH_ARG("--gpu-powertune", set_gpu_powertune, NULL, NULL, "Set the GPU powertune percentage - one value for all or separate by commas for per card"), OPT_WITHOUT_ARG("--gpu-reorder", opt_set_bool, &opt_reorder, "Attempt to reorder GPU devices according to PCI Bus ID"), OPT_WITH_ARG("--gpu-vddc", set_gpu_vddc, NULL, NULL, "Set the GPU voltage in Volts - one value for all or separate by commas for per card"), #endif #ifdef USE_SCRYPT OPT_WITH_ARG("--lookup-gap", set_lookup_gap, NULL, NULL, "Set GPU lookup gap for scrypt mining, comma separated"), OPT_WITH_ARG("--intensity|-I", set_intensity, NULL, NULL, "Intensity of GPU scanning (d or " MIN_SHA_INTENSITY_STR " -> " MAX_SCRYPT_INTENSITY_STR ",default: d to maintain desktop interactivity)"), #else OPT_WITH_ARG("--intensity|-I", set_intensity, NULL, NULL, "Intensity of GPU scanning (d or " MIN_SHA_INTENSITY_STR " -> " MAX_SHA_INTENSITY_STR ",default: d to maintain desktop interactivity)"), #endif #endif #if defined(HAVE_OPENCL) || defined(USE_MODMINER) || defined(USE_X6500) || defined(USE_ZTEX) OPT_WITH_ARG("--kernel-path|-K", opt_set_charp, opt_show_charp, &opt_kernel_path, "Specify a path to where bitstream and kernel files are"), #endif #ifdef HAVE_OPENCL OPT_WITH_ARG("--kernel|-k", set_kernel, NULL, NULL, "Override sha256 kernel to use (diablo, poclbm, phatk or diakgcn) - one value or comma separated"), #endif #ifdef USE_ICARUS OPT_WITH_ARG("--icarus-options", set_icarus_options, NULL, NULL, opt_hidden), OPT_WITH_ARG("--icarus-timing", set_icarus_timing, NULL, NULL, opt_hidden), #endif #ifdef USE_AVALON OPT_WITH_ARG("--avalon-options", set_avalon_options, NULL, NULL, opt_hidden), #endif #ifdef USE_KLONDIKE OPT_WITH_ARG("--klondike-options", set_klondike_options, NULL, NULL, "Set klondike options clock:temptarget"), #endif OPT_WITHOUT_ARG("--load-balance", set_loadbalance, &pool_strategy, "Change multipool strategy from failover to quota based balance"), OPT_WITH_ARG("--log|-l", set_int_0_to_9999, opt_show_intval, &opt_log_interval, "Interval in seconds between log output"), OPT_WITH_ARG("--log-file|-L", set_log_file, NULL, NULL, "Append log file for output messages"), OPT_WITH_ARG("--logfile", set_log_file, NULL, NULL, opt_hidden), OPT_WITHOUT_ARG("--log-microseconds", opt_set_bool, &opt_log_microseconds, "Include microseconds in log output"), #if defined(unix) || defined(__APPLE__) OPT_WITH_ARG("--monitor|-m", opt_set_charp, NULL, &opt_stderr_cmd, "Use custom pipe cmd for output messages"), #endif // defined(unix) OPT_WITHOUT_ARG("--net-delay", opt_set_bool, &opt_delaynet, "Impose small delays in networking to avoid overloading slow routers"), OPT_WITHOUT_ARG("--no-adl", opt_set_bool, &opt_noadl, #ifdef HAVE_ADL "Disable the ATI display library used for monitoring and setting GPU parameters" #else opt_hidden #endif ), OPT_WITHOUT_ARG("--no-gbt", opt_set_invbool, &want_gbt, "Disable getblocktemplate support"), OPT_WITHOUT_ARG("--no-getwork", opt_set_invbool, &want_getwork, "Disable getwork support"), OPT_WITHOUT_ARG("--no-longpoll", opt_set_invbool, &want_longpoll, "Disable X-Long-Polling support"), OPT_WITHOUT_ARG("--no-pool-disable", opt_set_invbool, &opt_disable_pool, opt_hidden), OPT_WITHOUT_ARG("--no-restart", opt_set_invbool, &opt_restart, "Do not attempt to restart devices that hang" ), OPT_WITHOUT_ARG("--no-show-processors", opt_set_invbool, &opt_show_procs, opt_hidden), OPT_WITHOUT_ARG("--no-show-procs", opt_set_invbool, &opt_show_procs, opt_hidden), OPT_WITHOUT_ARG("--no-stratum", opt_set_invbool, &want_stratum, "Disable Stratum detection"), OPT_WITHOUT_ARG("--no-submit-stale", opt_set_invbool, &opt_submit_stale, "Don't submit shares if they are detected as stale"), #ifdef HAVE_OPENCL OPT_WITHOUT_ARG("--no-opencl-binaries", opt_set_invbool, &opt_opencl_binaries, "Don't attempt to use or save OpenCL kernel binaries"), #endif OPT_WITHOUT_ARG("--no-unicode", #ifdef USE_UNICODE opt_set_invbool, &use_unicode, "Don't use Unicode characters in TUI" #else set_null, &use_unicode, opt_hidden #endif ), OPT_WITH_ARG("--noncelog", set_noncelog, NULL, NULL, "Create log of all nonces found"), OPT_WITH_ARG("--pass|-p", set_pass, NULL, NULL, "Password for bitcoin JSON-RPC server"), OPT_WITHOUT_ARG("--per-device-stats", opt_set_bool, &want_per_device_stats, "Force verbose mode and output per-device statistics"), OPT_WITH_ARG("--userpass|-O", // duplicate to ensure config loads it before pool-priority set_userpass, NULL, NULL, opt_hidden), OPT_WITH_ARG("--pool-priority", set_pool_priority, NULL, NULL, "Priority for just the previous-defined pool"), OPT_WITH_ARG("--pool-proxy|-x", set_pool_proxy, NULL, NULL, "Proxy URI to use for connecting to just the previous-defined pool"), OPT_WITH_ARG("--force-rollntime", // NOTE: must be after --pass for config file ordering set_pool_force_rollntime, NULL, NULL, opt_hidden), OPT_WITHOUT_ARG("--protocol-dump|-P", opt_set_bool, &opt_protocol, "Verbose dump of protocol-level activities"), OPT_WITH_ARG("--queue|-Q", set_int_0_to_9999, opt_show_intval, &opt_queue, "Minimum number of work items to have queued (0+)"), OPT_WITHOUT_ARG("--quiet|-q", opt_set_bool, &opt_quiet, "Disable logging output, display status and errors"), OPT_WITHOUT_ARG("--quiet-work-updates|--quiet-work-update", opt_set_bool, &opt_quiet_work_updates, opt_hidden), OPT_WITH_ARG("--quota|-U", set_quota, NULL, NULL, "quota;URL combination for server with load-balance strategy quotas"), OPT_WITHOUT_ARG("--real-quiet", opt_set_bool, &opt_realquiet, "Disable all output"), OPT_WITH_ARG("--request-diff", set_request_diff, opt_show_floatval, &request_pdiff, "Request a specific difficulty from pools"), OPT_WITH_ARG("--retries", opt_set_intval, opt_show_intval, &opt_retries, "Number of times to retry failed submissions before giving up (-1 means never)"), OPT_WITH_ARG("--retry-pause", set_null, NULL, NULL, opt_hidden), OPT_WITH_ARG("--rotate", set_rotate, opt_show_intval, &opt_rotate_period, "Change multipool strategy from failover to regularly rotate at N minutes"), OPT_WITHOUT_ARG("--round-robin", set_rr, &pool_strategy, "Change multipool strategy from failover to round robin on failure"), OPT_WITH_ARG("--scan|-S", add_serial, NULL, NULL, "Configure how to scan for mining devices"), OPT_WITH_ARG("--scan-device|--scan-serial|--devscan", add_serial, NULL, NULL, opt_hidden), OPT_WITH_ARG("--scan-time|-s", set_int_0_to_9999, opt_show_intval, &opt_scantime, "Upper bound on time spent scanning current work, in seconds"), OPT_WITH_ARG("--scantime", set_int_0_to_9999, opt_show_intval, &opt_scantime, opt_hidden), OPT_WITH_ARG("--sched-start", set_schedtime, NULL, &schedstart, "Set a time of day in HH:MM to start mining (a once off without a stop time)"), OPT_WITH_ARG("--sched-stop", set_schedtime, NULL, &schedstop, "Set a time of day in HH:MM to stop mining (will quit without a start time)"), #ifdef USE_SCRYPT OPT_WITHOUT_ARG("--scrypt", opt_set_bool, &opt_scrypt, "Use the scrypt algorithm for mining (non-bitcoin)"), #endif OPT_WITH_ARG("--set-device", opt_string_elist_add, NULL, &opt_set_device_list, "Set default parameters on devices; eg, NFY:osc6_bits=50"), #if defined(USE_SCRYPT) && defined(HAVE_OPENCL) OPT_WITH_ARG("--shaders", set_shaders, NULL, NULL, "GPU shaders per card for tuning scrypt, comma separated"), #endif #ifdef HAVE_PWD_H OPT_WITH_ARG("--setuid", opt_set_charp, NULL, &opt_setuid, "Username of an unprivileged user to run as"), #endif OPT_WITH_ARG("--sharelog", set_sharelog, NULL, NULL, "Append share log to file"), OPT_WITH_ARG("--shares", opt_set_intval, NULL, &opt_shares, "Quit after mining N shares (default: unlimited)"), OPT_WITHOUT_ARG("--show-processors", opt_set_bool, &opt_show_procs, "Show per processor statistics in summary"), OPT_WITHOUT_ARG("--show-procs", opt_set_bool, &opt_show_procs, opt_hidden), OPT_WITH_ARG("--skip-security-checks", set_int_0_to_9999, NULL, &opt_skip_checks, "Skip security checks sometimes to save bandwidth; only check 1/th of the time (default: never skip)"), OPT_WITH_ARG("--socks-proxy", opt_set_charp, NULL, &opt_socks_proxy, "Set socks proxy (host:port)"), #ifdef USE_LIBEVENT OPT_WITH_ARG("--stratum-port", opt_set_intval, opt_show_intval, &stratumsrv_port, "Port number to listen on for stratum miners (-1 means disabled)"), #endif OPT_WITHOUT_ARG("--submit-stale", opt_set_bool, &opt_submit_stale, opt_hidden), OPT_WITH_ARG("--submit-threads", opt_set_intval, opt_show_intval, &opt_submit_threads, "Minimum number of concurrent share submissions (default: 64)"), #ifdef HAVE_SYSLOG_H OPT_WITHOUT_ARG("--syslog", opt_set_bool, &use_syslog, "Use system log for output messages (default: standard error)"), #endif OPT_WITH_ARG("--temp-cutoff", set_temp_cutoff, NULL, &opt_cutofftemp, "Maximum temperature devices will be allowed to reach before being disabled, one value or comma separated list"), OPT_WITH_ARG("--temp-hysteresis", set_int_1_to_10, opt_show_intval, &opt_hysteresis, "Set how much the temperature can fluctuate outside limits when automanaging speeds"), #ifdef HAVE_ADL OPT_WITH_ARG("--temp-overheat", set_temp_overheat, opt_show_intval, &opt_overheattemp, "Overheat temperature when automatically managing fan and GPU speeds, one value or comma separated list"), #endif OPT_WITH_ARG("--temp-target", set_temp_target, NULL, NULL, "Target temperature when automatically managing fan and clock speeds, one value or comma separated list"), OPT_WITHOUT_ARG("--text-only|-T", opt_set_invbool, &use_curses, #ifdef HAVE_CURSES "Disable ncurses formatted screen output" #else opt_hidden #endif ), #if defined(USE_SCRYPT) && defined(HAVE_OPENCL) OPT_WITH_ARG("--thread-concurrency", set_thread_concurrency, NULL, NULL, "Set GPU thread concurrency for scrypt mining, comma separated"), #endif #ifdef USE_UNICODE OPT_WITHOUT_ARG("--unicode", opt_set_bool, &use_unicode, "Use Unicode characters in TUI"), #endif OPT_WITH_ARG("--url|-o", set_url, NULL, NULL, "URL for bitcoin JSON-RPC server"), OPT_WITH_ARG("--user|-u", set_user, NULL, NULL, "Username for bitcoin JSON-RPC server"), #ifdef HAVE_OPENCL OPT_WITH_ARG("--vectors|-v", set_vector, NULL, NULL, "Override detected optimal vector (1, 2 or 4) - one value or comma separated list"), #endif OPT_WITHOUT_ARG("--verbose", opt_set_bool, &opt_log_output, "Log verbose output to stderr as well as status output"), OPT_WITHOUT_ARG("--weighed-stats", opt_set_bool, &opt_weighed_stats, "Display statistics weighed to difficulty 1"), #ifdef HAVE_OPENCL OPT_WITH_ARG("--worksize|-w", set_worksize, NULL, NULL, "Override detected optimal worksize - one value or comma separated list"), #endif OPT_WITHOUT_ARG("--unittest", opt_set_bool, &opt_unittest, opt_hidden), OPT_WITH_ARG("--userpass|-O", set_userpass, NULL, NULL, "Username:Password pair for bitcoin JSON-RPC server"), OPT_WITHOUT_ARG("--worktime", opt_set_bool, &opt_worktime, "Display extra work time debug information"), OPT_WITH_ARG("--pools", opt_set_bool, NULL, NULL, opt_hidden), OPT_ENDTABLE }; static char *load_config(const char *arg, void __maybe_unused *unused); static int fileconf_load; static char *parse_config(json_t *config, bool fileconf) { static char err_buf[200]; struct opt_table *opt; json_t *val; if (fileconf && !fileconf_load) fileconf_load = 1; for (opt = opt_config_table; opt->type != OPT_END; opt++) { char *p, *name, *sp; /* We don't handle subtables. */ assert(!(opt->type & OPT_SUBTABLE)); if (!opt->names) continue; /* Pull apart the option name(s). */ name = strdup(opt->names); for (p = strtok_r(name, "|", &sp); p; p = strtok_r(NULL, "|", &sp)) { char *err = "Invalid value"; /* Ignore short options. */ if (p[1] != '-') continue; val = json_object_get(config, p+2); if (!val) continue; if (opt->type & OPT_HASARG) { if (json_is_string(val)) { err = opt->cb_arg(json_string_value(val), opt->u.arg); } else if (json_is_number(val)) { char buf[256], *p, *q; snprintf(buf, 256, "%f", json_number_value(val)); if ( (p = strchr(buf, '.')) ) { // Trim /\.0*$/ to work properly with integer-only arguments q = p; while (*(++q) == '0') {} if (*q == '\0') *p = '\0'; } err = opt->cb_arg(buf, opt->u.arg); } else if (json_is_array(val)) { int n, size = json_array_size(val); err = NULL; for (n = 0; n < size && !err; n++) { if (json_is_string(json_array_get(val, n))) err = opt->cb_arg(json_string_value(json_array_get(val, n)), opt->u.arg); else if (json_is_object(json_array_get(val, n))) err = parse_config(json_array_get(val, n), false); } } } else if (opt->type & OPT_NOARG) { if (json_is_true(val)) err = opt->cb(opt->u.arg); else if (json_is_boolean(val)) { if (opt->cb == (void*)opt_set_bool) err = opt_set_invbool(opt->u.arg); else if (opt->cb == (void*)opt_set_invbool) err = opt_set_bool(opt->u.arg); } } if (err) { /* Allow invalid values to be in configuration * file, just skipping over them provided the * JSON is still valid after that. */ if (fileconf) { applog(LOG_ERR, "Invalid config option %s: %s", p, err); fileconf_load = -1; } else { snprintf(err_buf, sizeof(err_buf), "Parsing JSON option %s: %s", p, err); return err_buf; } } } free(name); } val = json_object_get(config, JSON_INCLUDE_CONF); if (val && json_is_string(val)) return load_config(json_string_value(val), NULL); return NULL; } char *cnfbuf = NULL; static char *load_config(const char *arg, void __maybe_unused *unused) { json_error_t err; json_t *config; char *json_error; size_t siz; if (!cnfbuf) cnfbuf = strdup(arg); if (++include_count > JSON_MAX_DEPTH) return JSON_MAX_DEPTH_ERR; #if JANSSON_MAJOR_VERSION > 1 config = json_load_file(arg, 0, &err); #else config = json_load_file(arg, &err); #endif if (!json_is_object(config)) { siz = JSON_LOAD_ERROR_LEN + strlen(arg) + strlen(err.text); json_error = malloc(siz); if (!json_error) quit(1, "Malloc failure in json error"); snprintf(json_error, siz, JSON_LOAD_ERROR, arg, err.text); return json_error; } config_loaded = true; /* Parse the config now, so we can override it. That can keep pointers * so don't free config object. */ return parse_config(config, true); } static void load_default_config(void) { cnfbuf = malloc(PATH_MAX); #if defined(unix) if (getenv("HOME") && *getenv("HOME")) { strcpy(cnfbuf, getenv("HOME")); strcat(cnfbuf, "/"); } else strcpy(cnfbuf, ""); char *dirp = cnfbuf + strlen(cnfbuf); strcpy(dirp, ".bfgminer/"); strcat(dirp, def_conf); if (access(cnfbuf, R_OK)) // No BFGMiner config, try Cgminer's... strcpy(dirp, ".cgminer/cgminer.conf"); #else strcpy(cnfbuf, ""); strcat(cnfbuf, def_conf); #endif if (!access(cnfbuf, R_OK)) load_config(cnfbuf, NULL); else { free(cnfbuf); cnfbuf = NULL; } } extern const char *opt_argv0; static char *opt_verusage_and_exit(const char *extra) { puts(packagename); printf(" Drivers:%s\n", BFG_DRIVERLIST); printf(" Algoritms:%s\n", BFG_ALGOLIST); printf(" Options:%s\n", BFG_OPTLIST); printf("%s", opt_usage(opt_argv0, extra)); fflush(stdout); exit(0); } /* These options are available from commandline only */ static struct opt_table opt_cmdline_table[] = { OPT_WITH_ARG("--config|-c", load_config, NULL, NULL, "Load a JSON-format configuration file\n" "See example.conf for an example configuration."), OPT_WITHOUT_ARG("--help|-h", opt_verusage_and_exit, NULL, "Print this message"), #ifdef HAVE_OPENCL OPT_WITHOUT_ARG("--ndevs|-n", print_ndevs_and_exit, &nDevs, opt_hidden), #endif OPT_WITHOUT_ARG("--version|-V", opt_version_and_exit, packagename, "Display version and exit"), OPT_ENDTABLE }; static bool jobj_binary(const json_t *obj, const char *key, void *buf, size_t buflen, bool required) { const char *hexstr; json_t *tmp; tmp = json_object_get(obj, key); if (unlikely(!tmp)) { if (unlikely(required)) applog(LOG_ERR, "JSON key '%s' not found", key); return false; } hexstr = json_string_value(tmp); if (unlikely(!hexstr)) { applog(LOG_ERR, "JSON key '%s' is not a string", key); return false; } if (!hex2bin(buf, hexstr, buflen)) return false; return true; } static void calc_midstate(struct work *work) { union { unsigned char c[64]; uint32_t i[16]; } data; swap32yes(&data.i[0], work->data, 16); sha256_ctx ctx; sha256_init(&ctx); sha256_update(&ctx, data.c, 64); memcpy(work->midstate, ctx.h, sizeof(work->midstate)); swap32tole(work->midstate, work->midstate, 8); } static struct work *make_work(void) { struct work *work = calloc(1, sizeof(struct work)); if (unlikely(!work)) quit(1, "Failed to calloc work in make_work"); cg_wlock(&control_lock); work->id = total_work++; cg_wunlock(&control_lock); return work; } /* This is the central place all work that is about to be retired should be * cleaned to remove any dynamically allocated arrays within the struct */ void clean_work(struct work *work) { free(work->job_id); bytes_free(&work->nonce2); free(work->nonce1); if (work->tmpl) { struct pool *pool = work->pool; mutex_lock(&pool->pool_lock); bool free_tmpl = !--*work->tmpl_refcount; mutex_unlock(&pool->pool_lock); if (free_tmpl) { blktmpl_free(work->tmpl); free(work->tmpl_refcount); } } memset(work, 0, sizeof(struct work)); } /* All dynamically allocated work structs should be freed here to not leak any * ram from arrays allocated within the work struct */ void free_work(struct work *work) { clean_work(work); free(work); } static const char *workpadding_bin = "\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x80\x02\0\0"; // Must only be called with ch_lock held! static void __update_block_title(const unsigned char *hash_swap) { if (hash_swap) { char tmp[17]; // Only provided when the block has actually changed free(current_hash); current_hash = malloc(3 /* ... */ + 16 /* block hash segment */ + 1); bin2hex(tmp, &hash_swap[24], 8); memset(current_hash, '.', 3); memcpy(¤t_hash[3], tmp, 17); known_blkheight_current = false; } else if (likely(known_blkheight_current)) { return; } if (current_block_id == known_blkheight_blkid) { // FIXME: The block number will overflow this sometime around AD 2025-2027 if (known_blkheight < 1000000) { memmove(¤t_hash[3], ¤t_hash[11], 8); snprintf(¤t_hash[11], 20-11, " #%6u", known_blkheight); } known_blkheight_current = true; } } static void have_block_height(uint32_t block_id, uint32_t blkheight) { if (known_blkheight == blkheight) return; applog(LOG_DEBUG, "Learned that block id %08" PRIx32 " is height %" PRIu32, (uint32_t)be32toh(block_id), blkheight); cg_wlock(&ch_lock); known_blkheight = blkheight; known_blkheight_blkid = block_id; block_subsidy = 5000000000LL >> (blkheight / 210000); if (block_id == current_block_id) __update_block_title(NULL); cg_wunlock(&ch_lock); } static void pool_set_opaque(struct pool *pool, bool opaque) { if (pool->swork.opaque == opaque) return; pool->swork.opaque = opaque; if (opaque) applog(LOG_WARNING, "Pool %u is hiding block contents from us", pool->pool_no); else applog(LOG_NOTICE, "Pool %u now providing block contents to us", pool->pool_no); } static bool work_decode(struct pool *pool, struct work *work, json_t *val) { json_t *res_val = json_object_get(val, "result"); json_t *tmp_val; bool ret = false; if (unlikely(detect_algo == 1)) { json_t *tmp = json_object_get(res_val, "algorithm"); const char *v = tmp ? json_string_value(tmp) : ""; if (strncasecmp(v, "scrypt", 6)) detect_algo = 2; } if (work->tmpl) { struct timeval tv_now; cgtime(&tv_now); const char *err = blktmpl_add_jansson(work->tmpl, res_val, tv_now.tv_sec); if (err) { applog(LOG_ERR, "blktmpl error: %s", err); return false; } work->rolltime = blkmk_time_left(work->tmpl, tv_now.tv_sec); #if BLKMAKER_VERSION > 1 if (opt_coinbase_script.sz) { bool newcb; #if BLKMAKER_VERSION > 2 blkmk_init_generation2(work->tmpl, opt_coinbase_script.data, opt_coinbase_script.sz, &newcb); #else newcb = !work->tmpl->cbtxn; blkmk_init_generation(work->tmpl, opt_coinbase_script.data, opt_coinbase_script.sz); #endif if (newcb) { ssize_t ae = blkmk_append_coinbase_safe(work->tmpl, &template_nonce, sizeof(template_nonce)); if (ae < (ssize_t)sizeof(template_nonce)) applog(LOG_WARNING, "Cannot append template-nonce to coinbase on pool %u (%"PRId64") - you might be wasting hashing!", work->pool->pool_no, (int64_t)ae); ++template_nonce; } } #endif #if BLKMAKER_VERSION > 0 { ssize_t ae = blkmk_append_coinbase_safe(work->tmpl, opt_coinbase_sig, 101); static bool appenderr = false; if (ae <= 0) { if (opt_coinbase_sig) { applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Cannot append coinbase signature at all on pool %u (%"PRId64")", pool->pool_no, (int64_t)ae); appenderr = true; } } else if (ae >= 3 || opt_coinbase_sig) { const char *cbappend = opt_coinbase_sig; const char full[] = PACKAGE " " VERSION; if (!cbappend) { if ((size_t)ae >= sizeof(full) - 1) cbappend = full; else if ((size_t)ae >= sizeof(PACKAGE) - 1) cbappend = PACKAGE; else cbappend = "BFG"; } size_t cbappendsz = strlen(cbappend); static bool truncatewarning = false; if (cbappendsz <= (size_t)ae) { if (cbappendsz < (size_t)ae) // If we have space, include the trailing \0 ++cbappendsz; ae = cbappendsz; truncatewarning = false; } else { char *tmp = malloc(ae + 1); memcpy(tmp, opt_coinbase_sig, ae); tmp[ae] = '\0'; applog((truncatewarning ? LOG_DEBUG : LOG_WARNING), "Pool %u truncating appended coinbase signature at %"PRId64" bytes: %s(%s)", pool->pool_no, (int64_t)ae, tmp, &opt_coinbase_sig[ae]); free(tmp); truncatewarning = true; } ae = blkmk_append_coinbase_safe(work->tmpl, cbappend, ae); if (ae <= 0) { applog((appenderr ? LOG_DEBUG : LOG_WARNING), "Error appending coinbase signature (%"PRId64")", (int64_t)ae); appenderr = true; } else appenderr = false; } } #endif if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76) return false; swap32yes(work->data, work->data, 80 / 4); memcpy(&work->data[80], workpadding_bin, 48); const struct blktmpl_longpoll_req *lp; if ((lp = blktmpl_get_longpoll(work->tmpl)) && ((!pool->lp_id) || strcmp(lp->id, pool->lp_id))) { free(pool->lp_id); pool->lp_id = strdup(lp->id); #if 0 /* This just doesn't work :( */ curl_socket_t sock = pool->lp_socket; if (sock != CURL_SOCKET_BAD) { pool->lp_socket = CURL_SOCKET_BAD; applog(LOG_WARNING, "Pool %u long poll request hanging, reconnecting", pool->pool_no); shutdown(sock, SHUT_RDWR); } #endif } } else if (unlikely(!jobj_binary(res_val, "data", work->data, sizeof(work->data), true))) { applog(LOG_ERR, "JSON inval data"); return false; } if (!jobj_binary(res_val, "midstate", work->midstate, sizeof(work->midstate), false)) { // Calculate it ourselves applog(LOG_DEBUG, "Calculating midstate locally"); calc_midstate(work); } if (unlikely(!jobj_binary(res_val, "target", work->target, sizeof(work->target), true))) { applog(LOG_ERR, "JSON inval target"); return false; } if (work->tmpl) { for (size_t i = 0; i < sizeof(work->target) / 2; ++i) { int p = (sizeof(work->target) - 1) - i; unsigned char c = work->target[i]; work->target[i] = work->target[p]; work->target[p] = c; } } if ( (tmp_val = json_object_get(res_val, "height")) ) { uint32_t blkheight = json_number_value(tmp_val); uint32_t block_id = ((uint32_t*)work->data)[1]; have_block_height(block_id, blkheight); } memset(work->hash, 0, sizeof(work->hash)); cgtime(&work->tv_staged); pool_set_opaque(pool, !work->tmpl); ret = true; return ret; } /* Returns whether the pool supports local work generation or not. */ static bool pool_localgen(struct pool *pool) { return (pool->last_work_copy || pool->has_stratum); } int dev_from_id(int thr_id) { struct cgpu_info *cgpu = get_thr_cgpu(thr_id); return cgpu->device_id; } /* Create an exponentially decaying average over the opt_log_interval */ void decay_time(double *f, double fadd, double fsecs) { double ftotal, fprop; fprop = 1.0 - 1 / (exp(fsecs / (double)opt_log_interval)); ftotal = 1.0 + fprop; *f += (fadd * fprop); *f /= ftotal; } static int __total_staged(void) { return HASH_COUNT(staged_work); } static int total_staged(void) { int ret; mutex_lock(stgd_lock); ret = __total_staged(); mutex_unlock(stgd_lock); return ret; } #ifdef HAVE_CURSES WINDOW *mainwin, *statuswin, *logwin; #endif double total_secs = 1.0; static char statusline[256]; /* logstart is where the log window should start */ static int devcursor, logstart, logcursor; #ifdef HAVE_CURSES /* statusy is where the status window goes up to in cases where it won't fit at startup */ static int statusy; static int devsummaryYOffset; static int total_lines; #endif #ifdef HAVE_OPENCL struct cgpu_info gpus[MAX_GPUDEVICES]; /* Maximum number apparently possible */ #endif struct cgpu_info *cpus; bool _bfg_console_cancel_disabled; int _bfg_console_prev_cancelstate; #ifdef HAVE_CURSES #define lock_curses() bfg_console_lock() #define unlock_curses() bfg_console_unlock() static bool curses_active_locked(void) { bool ret; lock_curses(); ret = curses_active; if (!ret) unlock_curses(); return ret; } // Cancellable getch int my_cancellable_getch(void) { // This only works because the macro only hits direct getch() calls typedef int (*real_getch_t)(void); const real_getch_t real_getch = __real_getch; int type, rv; bool sct; sct = !pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &type); rv = real_getch(); if (sct) pthread_setcanceltype(type, &type); return rv; } #ifdef PDCURSES static int bfg_wresize(WINDOW *win, int lines, int columns) { int rv = wresize(win, lines, columns); int x, y; getyx(win, y, x); if (unlikely(y >= lines || x >= columns)) { if (y >= lines) y = lines - 1; if (x >= columns) x = columns - 1; wmove(win, y, x); } return rv; } #else # define bfg_wresize wresize #endif #endif void tailsprintf(char *buf, size_t bufsz, const char *fmt, ...) { va_list ap; size_t presz = strlen(buf); va_start(ap, fmt); vsnprintf(&buf[presz], bufsz - presz, fmt, ap); va_end(ap); } double stats_elapsed(struct cgminer_stats *stats) { struct timeval now; double elapsed; if (stats->start_tv.tv_sec == 0) elapsed = total_secs; else { cgtime(&now); elapsed = tdiff(&now, &stats->start_tv); } if (elapsed < 1.0) elapsed = 1.0; return elapsed; } bool drv_ready(struct cgpu_info *cgpu) { switch (cgpu->status) { case LIFE_INIT: case LIFE_DEAD2: return false; default: return true; } } double cgpu_utility(struct cgpu_info *cgpu) { double dev_runtime = cgpu_runtime(cgpu); return cgpu->utility = cgpu->accepted / dev_runtime * 60; } /* Convert a uint64_t value into a truncated string for displaying with its * associated suitable for Mega, Giga etc. Buf array needs to be long enough */ static void suffix_string(uint64_t val, char *buf, size_t bufsiz, int sigdigits) { const double dkilo = 1000.0; const uint64_t kilo = 1000ull; const uint64_t mega = 1000000ull; const uint64_t giga = 1000000000ull; const uint64_t tera = 1000000000000ull; const uint64_t peta = 1000000000000000ull; const uint64_t exa = 1000000000000000000ull; char suffix[2] = ""; bool decimal = true; double dval; if (val >= exa) { val /= peta; dval = (double)val / dkilo; strcpy(suffix, "E"); } else if (val >= peta) { val /= tera; dval = (double)val / dkilo; strcpy(suffix, "P"); } else if (val >= tera) { val /= giga; dval = (double)val / dkilo; strcpy(suffix, "T"); } else if (val >= giga) { val /= mega; dval = (double)val / dkilo; strcpy(suffix, "G"); } else if (val >= mega) { val /= kilo; dval = (double)val / dkilo; strcpy(suffix, "M"); } else if (val >= kilo) { dval = (double)val / dkilo; strcpy(suffix, "k"); } else { dval = val; decimal = false; } if (!sigdigits) { if (decimal) snprintf(buf, bufsiz, "%.3g%s", dval, suffix); else snprintf(buf, bufsiz, "%d%s", (unsigned int)dval, suffix); } else { /* Always show sigdigits + 1, padded on right with zeroes * followed by suffix */ int ndigits = sigdigits - 1 - (dval > 0.0 ? floor(log10(dval)) : 0); snprintf(buf, bufsiz, "%*.*f%s", sigdigits + 1, ndigits, dval, suffix); } } static float utility_to_hashrate(double utility) { return utility * 0x4444444; } static const char*_unitchar = "pn\xb5m kMGTPEZY?"; static const int _unitbase = 4; static void pick_unit(float hashrate, unsigned char *unit) { unsigned char i; if (hashrate == 0) { if (*unit < _unitbase) *unit = _unitbase; return; } hashrate *= 1e12; for (i = 0; i < *unit; ++i) hashrate /= 1e3; // 1000 but with tolerance for floating-point rounding, avoid showing "1000.0" while (hashrate >= 999.95) { hashrate /= 1e3; if (likely(_unitchar[*unit] != '?')) ++*unit; } } #define hashrate_pick_unit(hashrate, unit) pick_unit(hashrate, unit) enum h2bs_fmt { H2B_NOUNIT, // "xxx.x" H2B_SHORT, // "xxx.xMH/s" H2B_SPACED, // "xxx.x MH/s" }; static const size_t h2bs_fmt_size[] = {6, 10, 11}; enum bfu_floatprec { FUP_INTEGER, FUP_HASHES, FUP_BTC, }; static int format_unit3(char *buf, size_t sz, enum bfu_floatprec fprec, const char *measurement, enum h2bs_fmt fmt, float hashrate, signed char unitin) { char *s = buf; unsigned char prec, i, unit; int rv = 0; if (unitin == -1) { unit = 0; hashrate_pick_unit(hashrate, &unit); } else unit = unitin; hashrate *= 1e12; for (i = 0; i < unit; ++i) hashrate /= 1000; switch (fprec) { case FUP_HASHES: // 100 but with tolerance for floating-point rounding, max "99.99" then "100.0" if (hashrate >= 99.995 || unit < 6) prec = 1; else prec = 2; _SNP("%5.*f", prec, hashrate); break; case FUP_INTEGER: _SNP("%3d", (int)hashrate); break; case FUP_BTC: if (hashrate >= 99.995) prec = 0; else prec = 2; _SNP("%5.*f", prec, hashrate); } switch (fmt) { case H2B_SPACED: _SNP(" "); case H2B_SHORT: _SNP("%c%s", _unitchar[unit], measurement); default: break; } return rv; } #define format_unit2(buf, sz, floatprec, measurement, fmt, n, unit) \ format_unit3(buf, sz, floatprec ? FUP_HASHES : FUP_INTEGER, measurement, fmt, n, unit) static char *_multi_format_unit(char **buflist, size_t *bufszlist, bool floatprec, const char *measurement, enum h2bs_fmt fmt, const char *delim, int count, const float *numbers, bool isarray) { unsigned char unit = 0; bool allzero = true; int i; size_t delimsz = 0; char *buf = buflist[0]; size_t bufsz = bufszlist[0]; size_t itemwidth = (floatprec ? 5 : 3); if (!isarray) delimsz = strlen(delim); for (i = 0; i < count; ++i) if (numbers[i] != 0) { pick_unit(numbers[i], &unit); allzero = false; } if (allzero) unit = _unitbase; --count; for (i = 0; i < count; ++i) { format_unit2(buf, bufsz, floatprec, NULL, H2B_NOUNIT, numbers[i], unit); if (isarray) { buf = buflist[i + 1]; bufsz = bufszlist[i + 1]; } else { buf += itemwidth; bufsz -= itemwidth; if (delimsz > bufsz) delimsz = bufsz; memcpy(buf, delim, delimsz); buf += delimsz; bufsz -= delimsz; } } // Last entry has the unit format_unit2(buf, bufsz, floatprec, measurement, fmt, numbers[count], unit); return buflist[0]; } #define multi_format_unit2(buf, bufsz, floatprec, measurement, fmt, delim, count, ...) _multi_format_unit((char *[]){buf}, (size_t[]){bufsz}, floatprec, measurement, fmt, delim, count, (float[]){ __VA_ARGS__ }, false) #define multi_format_unit_array2(buflist, bufszlist, floatprec, measurement, fmt, count, ...) (void)_multi_format_unit(buflist, bufszlist, floatprec, measurement, fmt, NULL, count, (float[]){ __VA_ARGS__ }, true) static int percentf3(char * const buf, size_t sz, double p, const double t) { char *s = buf; int rv = 0; if (!p) _SNP("none"); else if (t <= p) _SNP("100%%"); else { p /= t; if (p < 0.00995) // 0.01 but with tolerance for floating-point rounding, max ".99%" _SNP(".%02.0f%%", p * 10000); // ".01%" else if (p < 0.0995) // 0.1 but with tolerance for floating-point rounding, max "9.9%" _SNP("%.1f%%", p * 100); // "9.1%" else _SNP("%3.0f%%", p * 100); // " 99%" } return rv; } #define percentf4(buf, bufsz, p, t) percentf3(buf, bufsz, p, p + t) static void test_decimal_width() { // The pipe character at end of each line should perfectly line up char printbuf[512]; char testbuf1[64]; char testbuf2[64]; char testbuf3[64]; char testbuf4[64]; double testn; int width; int saved; // Hotspots around 0.1 and 0.01 saved = -1; for (testn = 0.09; testn <= 0.11; testn += 0.000001) { percentf3(testbuf1, sizeof(testbuf1), testn, 1.0); percentf3(testbuf2, sizeof(testbuf2), testn, 10.0); width = snprintf(printbuf, sizeof(printbuf), "%10g %s %s |", testn, testbuf1, testbuf2); if (unlikely((saved != -1) && (width != saved))) { applog(LOG_ERR, "Test width mismatch in percentf3! %d not %d at %10g", width, saved, testn); applog(LOG_ERR, "%s", printbuf); } saved = width; } // Hotspot around 100 (but test this in several units because format_unit2 also has unit<2 check) saved = -1; for (testn = 99.0; testn <= 101.0; testn += 0.0001) { format_unit2(testbuf1, sizeof(testbuf1), true, "x", H2B_SHORT, testn , -1); format_unit2(testbuf2, sizeof(testbuf2), true, "x", H2B_SHORT, testn * 1e3, -1); format_unit2(testbuf3, sizeof(testbuf3), true, "x", H2B_SHORT, testn * 1e6, -1); width = snprintf(printbuf, sizeof(printbuf), "%10g %s %s %s |", testn, testbuf1, testbuf2, testbuf3); if (unlikely((saved != -1) && (width != saved))) { applog(LOG_ERR, "Test width mismatch in format_unit2! %d not %d at %10g", width, saved, testn); applog(LOG_ERR, "%s", printbuf); } saved = width; } // Hotspot around unit transition boundary in pick_unit saved = -1; for (testn = 999.0; testn <= 1001.0; testn += 0.0001) { format_unit2(testbuf1, sizeof(testbuf1), true, "x", H2B_SHORT, testn , -1); format_unit2(testbuf2, sizeof(testbuf2), true, "x", H2B_SHORT, testn * 1e3, -1); format_unit2(testbuf3, sizeof(testbuf3), true, "x", H2B_SHORT, testn * 1e6, -1); format_unit2(testbuf4, sizeof(testbuf4), true, "x", H2B_SHORT, testn * 1e9, -1); width = snprintf(printbuf, sizeof(printbuf), "%10g %s %s %s %s |", testn, testbuf1, testbuf2, testbuf3, testbuf4); if (unlikely((saved != -1) && (width != saved))) { applog(LOG_ERR, "Test width mismatch in pick_unit! %d not %d at %10g", width, saved, testn); applog(LOG_ERR, "%s", printbuf); } saved = width; } } #ifdef HAVE_CURSES static void adj_width(int var, int *length); #endif #ifdef HAVE_CURSES static int awidth = 1, rwidth = 1, swidth = 1, hwwidth = 1; static void format_statline(char *buf, size_t bufsz, const char *cHr, const char *aHr, const char *uHr, int accepted, int rejected, int stale, int wnotaccepted, int waccepted, int hwerrs, int badnonces, int allnonces) { char rejpcbuf[6]; char bnbuf[6]; adj_width(accepted, &awidth); adj_width(rejected, &rwidth); adj_width(stale, &swidth); adj_width(hwerrs, &hwwidth); percentf4(rejpcbuf, sizeof(rejpcbuf), wnotaccepted, waccepted); percentf3(bnbuf, sizeof(bnbuf), badnonces, allnonces); tailsprintf(buf, bufsz, "%s/%s/%s | A:%*d R:%*d+%*d(%s) HW:%*d/%s", cHr, aHr, uHr, awidth, accepted, rwidth, rejected, swidth, stale, rejpcbuf, hwwidth, hwerrs, bnbuf ); } #endif static inline void temperature_column(char *buf, size_t bufsz, bool maybe_unicode, const float * const temp) { if (!(use_unicode && have_unicode_degrees)) maybe_unicode = false; if (temp && *temp > 0.) if (maybe_unicode) snprintf(buf, bufsz, "%4.1f"U8_DEGREE"C", *temp); else snprintf(buf, bufsz, "%4.1fC", *temp); else { if (temp) snprintf(buf, bufsz, " "); if (maybe_unicode) tailsprintf(buf, bufsz, " "); } tailsprintf(buf, bufsz, " | "); } void get_statline3(char *buf, size_t bufsz, struct cgpu_info *cgpu, bool for_curses, bool opt_show_procs) { #ifndef HAVE_CURSES assert(for_curses == false); #endif struct device_drv *drv = cgpu->drv; enum h2bs_fmt hashrate_style = for_curses ? H2B_SHORT : H2B_SPACED; char cHr[h2bs_fmt_size[H2B_NOUNIT]], aHr[h2bs_fmt_size[H2B_NOUNIT]], uHr[h2bs_fmt_size[hashrate_style]]; char rejpcbuf[6]; char bnbuf[6]; double dev_runtime; if (!opt_show_procs) cgpu = cgpu->device; dev_runtime = cgpu_runtime(cgpu); double rolling, mhashes; int accepted, rejected, stale; double waccepted; double wnotaccepted; int hwerrs, badnonces, goodnonces; rolling = mhashes = waccepted = wnotaccepted = 0; accepted = rejected = stale = hwerrs = badnonces = goodnonces = 0; { struct cgpu_info *slave = cgpu; for (int i = 0; i < cgpu->procs; ++i, (slave = slave->next_proc)) { slave->utility = slave->accepted / dev_runtime * 60; slave->utility_diff1 = slave->diff_accepted / dev_runtime * 60; rolling += slave->rolling; mhashes += slave->total_mhashes; if (opt_weighed_stats) { accepted += slave->diff_accepted; rejected += slave->diff_rejected; stale += slave->diff_stale; } else { accepted += slave->accepted; rejected += slave->rejected; stale += slave->stale; } waccepted += slave->diff_accepted; wnotaccepted += slave->diff_rejected + slave->diff_stale; hwerrs += slave->hw_errors; badnonces += slave->bad_nonces; goodnonces += slave->diff1; if (opt_show_procs) break; } } double wtotal = (waccepted + wnotaccepted); multi_format_unit_array2( ((char*[]){cHr, aHr, uHr}), ((size_t[]){h2bs_fmt_size[H2B_NOUNIT], h2bs_fmt_size[H2B_NOUNIT], h2bs_fmt_size[hashrate_style]}), true, "h/s", hashrate_style, 3, 1e6*rolling, 1e6*mhashes / dev_runtime, utility_to_hashrate(goodnonces * (wtotal ? (waccepted / wtotal) : 1) * 60 / dev_runtime)); // Processor representation #ifdef HAVE_CURSES if (for_curses) { if (opt_show_procs) snprintf(buf, bufsz, " %"PRIprepr": ", cgpu->proc_repr); else snprintf(buf, bufsz, " %s: ", cgpu->dev_repr); } else #endif snprintf(buf, bufsz, "%s ", opt_show_procs ? cgpu->proc_repr_ns : cgpu->dev_repr_ns); if (unlikely(cgpu->status == LIFE_INIT)) { tailsprintf(buf, bufsz, "Initializing..."); return; } { const size_t bufln = strlen(buf); const size_t abufsz = (bufln >= bufsz) ? 0 : (bufsz - bufln); if (likely(cgpu->status != LIFE_DEAD2) && drv->override_statline_temp2 && drv->override_statline_temp2(buf, bufsz, cgpu, opt_show_procs)) temperature_column(&buf[bufln], abufsz, for_curses, NULL); else { float temp = cgpu->temp; if (!opt_show_procs) { // Find the highest temperature of all processors struct cgpu_info *proc = cgpu; for (int i = 0; i < cgpu->procs; ++i, (proc = proc->next_proc)) if (proc->temp > temp) temp = proc->temp; } temperature_column(&buf[bufln], abufsz, for_curses, &temp); } } #ifdef HAVE_CURSES if (for_curses) { const char *cHrStatsOpt[] = {AS_BAD("DEAD "), AS_BAD("SICK "), "OFF ", AS_BAD("REST "), AS_BAD(" ERR "), AS_BAD("WAIT "), cHr}; const char *cHrStats; int cHrStatsI = (sizeof(cHrStatsOpt) / sizeof(*cHrStatsOpt)) - 1; bool all_dead = true, all_off = true, all_rdrv = true; struct cgpu_info *proc = cgpu; for (int i = 0; i < cgpu->procs; ++i, (proc = proc->next_proc)) { switch (cHrStatsI) { default: if (proc->status == LIFE_WAIT) cHrStatsI = 5; case 5: if (proc->deven == DEV_RECOVER_ERR) cHrStatsI = 4; case 4: if (proc->deven == DEV_RECOVER) cHrStatsI = 3; case 3: if (proc->status == LIFE_SICK || proc->status == LIFE_DEAD || proc->status == LIFE_DEAD2) { cHrStatsI = 1; all_off = false; } else { if (likely(proc->deven == DEV_ENABLED)) all_off = false; if (proc->deven != DEV_RECOVER_DRV) all_rdrv = false; } case 1: break; } if (likely(proc->status != LIFE_DEAD && proc->status != LIFE_DEAD2)) all_dead = false; if (opt_show_procs) break; } if (unlikely(all_dead)) cHrStatsI = 0; else if (unlikely(all_off)) cHrStatsI = 2; cHrStats = cHrStatsOpt[cHrStatsI]; if (cHrStatsI == 2 && all_rdrv) cHrStats = " RST "; format_statline(buf, bufsz, cHrStats, aHr, uHr, accepted, rejected, stale, wnotaccepted, waccepted, hwerrs, badnonces, badnonces + goodnonces); } else #endif { percentf4(rejpcbuf, sizeof(rejpcbuf), wnotaccepted, waccepted); percentf4(bnbuf, sizeof(bnbuf), badnonces, goodnonces); tailsprintf(buf, bufsz, "%ds:%s avg:%s u:%s | A:%d R:%d+%d(%s) HW:%d/%s", opt_log_interval, cHr, aHr, uHr, accepted, rejected, stale, rejpcbuf, hwerrs, bnbuf ); } } #define get_statline(buf, bufsz, cgpu) get_statline3(buf, bufsz, cgpu, false, opt_show_procs) #define get_statline2(buf, bufsz, cgpu, for_curses) get_statline3(buf, bufsz, cgpu, for_curses, opt_show_procs) static void text_print_status(int thr_id) { struct cgpu_info *cgpu; char logline[256]; cgpu = get_thr_cgpu(thr_id); if (cgpu) { get_statline(logline, sizeof(logline), cgpu); printf("%s\n", logline); } } #ifdef HAVE_CURSES static int attr_bad = A_BOLD; #ifdef WIN32 #define swprintf snwprintf #endif static void bfg_waddstr(WINDOW *win, const char *s) { const char *p = s; int32_t w; int wlen; unsigned char stop_ascii = (use_unicode ? '|' : 0x80); while (true) { while (likely(p[0] == '\n' || (p[0] >= 0x20 && p[0] < stop_ascii))) { // Printable ASCII ++p; } if (p != s) waddnstr(win, s, p - s); w = utf8_decode(p, &wlen); if (unlikely(p[0] == '\xb5')) // HACK for Mu (SI prefix micro-) { w = unicode_micro; wlen = 1; } s = p += wlen; switch(w) { // NOTE: U+F000-U+F7FF are reserved for font hacks case '\0': return; case 0xf000: // "bad" off wattroff(win, attr_bad); break; case 0xf001: // "bad" on wattron(win, attr_bad); break; #ifdef USE_UNICODE case '|': wadd_wch(win, WACS_VLINE); break; #endif case 0x2500: // BOX DRAWINGS LIGHT HORIZONTAL case 0x2534: // BOX DRAWINGS LIGHT UP AND HORIZONTAL if (!use_unicode) { waddch(win, '-'); break; } #ifdef USE_UNICODE wadd_wch(win, (w == 0x2500) ? WACS_HLINE : WACS_BTEE); break; #endif case 0x2022: if (w > WCHAR_MAX || !iswprint(w)) w = '*'; default: if (w > WCHAR_MAX || !(iswprint(w) || w == '\n')) { #if REPLACEMENT_CHAR <= WCHAR_MAX if (iswprint(REPLACEMENT_CHAR)) w = REPLACEMENT_CHAR; else #endif w = '?'; } { #ifdef USE_UNICODE wchar_t wbuf[0x10]; int wbuflen = sizeof(wbuf) / sizeof(*wbuf); wbuflen = swprintf(wbuf, wbuflen, L"%lc", (wint_t)w); waddnwstr(win, wbuf, wbuflen); #else wprintw(win, "%lc", (wint_t)w); #endif } } } } static inline void bfg_hline(WINDOW *win, int y) { int maxx, __maybe_unused maxy; getmaxyx(win, maxy, maxx); #ifdef USE_UNICODE if (use_unicode) mvwhline_set(win, y, 0, WACS_HLINE, maxx); else #endif mvwhline(win, y, 0, '-', maxx); } // Spaces until end of line, using current attributes (ie, not completely clear) static void bfg_wspctoeol(WINDOW * const win, const int offset) { int x, maxx; int __maybe_unused y; getmaxyx(win, y, maxx); getyx(win, y, x); const int space_count = (maxx - x) - offset; // Check for negative - terminal too narrow if (space_count <= 0) return; char buf[space_count]; memset(buf, ' ', space_count); waddnstr(win, buf, space_count); } static int menu_attr = A_REVERSE; #define CURBUFSIZ 256 #define cg_mvwprintw(win, y, x, fmt, ...) do { \ char tmp42[CURBUFSIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ wmove(win, y, x); \ bfg_waddstr(win, tmp42); \ } while (0) #define cg_wprintw(win, fmt, ...) do { \ char tmp42[CURBUFSIZ]; \ snprintf(tmp42, sizeof(tmp42), fmt, ##__VA_ARGS__); \ bfg_waddstr(win, tmp42); \ } while (0) /* Must be called with curses mutex lock held and curses_active */ static void curses_print_status(const int ts) { struct pool *pool = currentpool; struct timeval now, tv; float efficiency; double income; int logdiv; efficiency = total_bytes_xfer ? total_diff_accepted * 2048. / total_bytes_xfer : 0.0; wattron(statuswin, attr_title); cg_mvwprintw(statuswin, 0, 0, " " PACKAGE " version " VERSION " - Started: %s", datestamp); timer_set_now(&now); { unsigned int days, hours; div_t d; timersub(&now, &miner_started, &tv); d = div(tv.tv_sec, 86400); days = d.quot; d = div(d.rem, 3600); hours = d.quot; d = div(d.rem, 60); cg_wprintw(statuswin, " - [%3u day%c %02d:%02d:%02d]" , days , (days == 1) ? ' ' : 's' , hours , d.quot , d.rem ); } bfg_wspctoeol(statuswin, 0); wattroff(statuswin, attr_title); wattron(statuswin, menu_attr); wmove(statuswin, 1, 0); bfg_waddstr(statuswin, " [M]anage devices [P]ool management [S]ettings [D]isplay options "); bfg_wspctoeol(statuswin, 14); bfg_waddstr(statuswin, "[H]elp [Q]uit "); wattroff(statuswin, menu_attr); if ((pool_strategy == POOL_LOADBALANCE || pool_strategy == POOL_BALANCE) && total_pools > 1) { cg_mvwprintw(statuswin, 2, 0, " Connected to multiple pools with%s block change notify", have_longpoll ? "": "out"); } else if (pool->has_stratum) { cg_mvwprintw(statuswin, 2, 0, " Connected to %s diff %s with stratum as user %s", pool->sockaddr_url, pool->diff, pool->rpc_user); } else { cg_mvwprintw(statuswin, 2, 0, " Connected to %s diff %s with%s LP as user %s", pool->sockaddr_url, pool->diff, have_longpoll ? "": "out", pool->rpc_user); } wclrtoeol(statuswin); cg_mvwprintw(statuswin, 3, 0, " Block: %s Diff:%s (%s) Started: %s", current_hash, block_diff, net_hashrate, blocktime); income = total_diff_accepted * 3600 * block_subsidy / total_secs / current_diff; char bwstr[12], incomestr[13]; format_unit3(incomestr, sizeof(incomestr), FUP_BTC, "BTC/hr", H2B_SHORT, income/1e8, -1); cg_mvwprintw(statuswin, 4, 0, " ST:%d F:%d NB:%d AS:%d BW:[%s] E:%.2f I:%s BS:%s", ts, total_go + total_ro, new_blocks, total_submitting, multi_format_unit2(bwstr, sizeof(bwstr), false, "B/s", H2B_SHORT, "/", 2, (float)(total_bytes_rcvd / total_secs), (float)(total_bytes_sent / total_secs)), efficiency, incomestr, best_share); wclrtoeol(statuswin); mvwaddstr(statuswin, 5, 0, " "); bfg_waddstr(statuswin, statusline); wclrtoeol(statuswin); logdiv = statusy - 1; bfg_hline(statuswin, 6); bfg_hline(statuswin, logdiv); #ifdef USE_UNICODE if (use_unicode) { int offset = 8 /* device */ + 5 /* temperature */ + 1 /* padding space */; if (opt_show_procs && !opt_compact) ++offset; // proc letter if (have_unicode_degrees) ++offset; // degrees symbol mvwadd_wch(statuswin, 6, offset, WACS_PLUS); mvwadd_wch(statuswin, logdiv, offset, WACS_BTEE); offset += 24; // hashrates etc mvwadd_wch(statuswin, 6, offset, WACS_PLUS); mvwadd_wch(statuswin, logdiv, offset, WACS_BTEE); } #endif } static void adj_width(int var, int *length) { if ((int)(log10(var) + 1) > *length) (*length)++; } static int dev_width; static void curses_print_devstatus(struct cgpu_info *cgpu) { char logline[256]; int ypos; if (opt_compact) return; /* Check this isn't out of the window size */ if (opt_show_procs) ypos = cgpu->cgminer_id; else { if (cgpu->proc_id) return; ypos = cgpu->device_line_id; } ypos += devsummaryYOffset; if (ypos < 0) return; ypos += devcursor - 1; if (ypos >= statusy - 1) return; if (wmove(statuswin, ypos, 0) == ERR) return; get_statline2(logline, sizeof(logline), cgpu, true); if (selecting_device && (opt_show_procs ? (selected_device == cgpu->cgminer_id) : (devices[selected_device]->device == cgpu))) wattron(statuswin, A_REVERSE); bfg_waddstr(statuswin, logline); wattroff(statuswin, A_REVERSE); wclrtoeol(statuswin); } static void _refresh_devstatus(const bool already_have_lock) { if ((!opt_compact) && (already_have_lock || curses_active_locked())) { int i; if (unlikely(!total_devices)) { const int ypos = devcursor - 1; if (ypos < statusy - 1 && wmove(statuswin, ypos, 0) != ERR) { wattron(statuswin, attr_bad); bfg_waddstr(statuswin, "NO DEVICES FOUND: Press 'M' and '+' to add"); wclrtoeol(statuswin); wattroff(statuswin, attr_bad); } } for (i = 0; i < total_devices; i++) curses_print_devstatus(get_devices(i)); touchwin(statuswin); wrefresh(statuswin); if (!already_have_lock) unlock_curses(); } } #define refresh_devstatus() _refresh_devstatus(false) #endif static void print_status(int thr_id) { if (!curses_active) text_print_status(thr_id); } #ifdef HAVE_CURSES static bool set_statusy(int maxy) { if (loginput_size) { maxy -= loginput_size; if (maxy < 0) maxy = 0; } if (logstart < maxy) maxy = logstart; if (statusy == maxy) return false; statusy = maxy; logcursor = statusy; return true; } /* Check for window resize. Called with curses mutex locked */ static inline void change_logwinsize(void) { int x, y, logx, logy; getmaxyx(mainwin, y, x); if (x < 80 || y < 25) return; if (y > statusy + 2 && statusy < logstart) { set_statusy(y - 2); mvwin(logwin, logcursor, 0); bfg_wresize(statuswin, statusy, x); } y -= logcursor; getmaxyx(logwin, logy, logx); /* Detect screen size change */ if (x != logx || y != logy) bfg_wresize(logwin, y, x); } static void check_winsizes(void) { if (!use_curses) return; if (curses_active_locked()) { int y, x; x = getmaxx(statuswin); if (set_statusy(LINES - 2)) { erase(); bfg_wresize(statuswin, statusy, x); getmaxyx(mainwin, y, x); y -= logcursor; bfg_wresize(logwin, y, x); mvwin(logwin, logcursor, 0); } unlock_curses(); } } static int device_line_id_count; static void switch_logsize(void) { if (curses_active_locked()) { if (opt_compact) { logstart = devcursor - 1; logcursor = logstart + 1; } else { total_lines = (opt_show_procs ? total_devices : device_line_id_count) ?: 1; logstart = devcursor + total_lines; logcursor = logstart; } unlock_curses(); } check_winsizes(); } /* For mandatory printing when mutex is already locked */ void _wlog(const char *str) { static bool newline; size_t end = strlen(str) - 1; if (newline) bfg_waddstr(logwin, "\n"); if (str[end] == '\n') { char *s; newline = true; s = alloca(end + 1); memcpy(s, str, end); s[end] = '\0'; str = s; } else newline = false; bfg_waddstr(logwin, str); } /* Mandatory printing */ void _wlogprint(const char *str) { if (curses_active_locked()) { _wlog(str); unlock_curses(); } } #endif #ifdef HAVE_CURSES bool _log_curses_only(int prio, const char *datetime, const char *str) { bool high_prio; high_prio = (prio == LOG_WARNING || prio == LOG_ERR); if (curses_active) { if (!loginput_size || high_prio) { wlog(" %s %s\n", datetime, str); if (high_prio) { touchwin(logwin); wrefresh(logwin); } } return true; } return false; } void clear_logwin(void) { if (curses_active_locked()) { wclear(logwin); unlock_curses(); } } void logwin_update(void) { if (curses_active_locked()) { touchwin(logwin); wrefresh(logwin); unlock_curses(); } } #endif static void enable_pool(struct pool *pool) { if (pool->enabled != POOL_ENABLED) { enabled_pools++; pool->enabled = POOL_ENABLED; } } #ifdef HAVE_CURSES static void disable_pool(struct pool *pool) { if (pool->enabled == POOL_ENABLED) enabled_pools--; pool->enabled = POOL_DISABLED; } #endif static void reject_pool(struct pool *pool) { if (pool->enabled == POOL_ENABLED) enabled_pools--; pool->enabled = POOL_REJECTING; } static uint64_t share_diff(const struct work *); static void share_result_msg(const struct work *work, const char *disp, const char *reason, bool resubmit, const char *worktime) { struct cgpu_info *cgpu; const unsigned char *hashpart = &work->hash[opt_scrypt ? 26 : 24]; char shrdiffdisp[16]; int tgtdiff = floor(work->work_difficulty); char tgtdiffdisp[16]; char where[20]; cgpu = get_thr_cgpu(work->thr_id); suffix_string(work->share_diff, shrdiffdisp, sizeof(shrdiffdisp), 0); suffix_string(tgtdiff, tgtdiffdisp, sizeof(tgtdiffdisp), 0); if (total_pools > 1) snprintf(where, sizeof(where), " pool %d", work->pool->pool_no); else where[0] = '\0'; applog(LOG_NOTICE, "%s %02x%02x%02x%02x %"PRIprepr"%s Diff %s/%s%s %s%s", disp, (unsigned)hashpart[3], (unsigned)hashpart[2], (unsigned)hashpart[1], (unsigned)hashpart[0], cgpu->proc_repr, where, shrdiffdisp, tgtdiffdisp, reason, resubmit ? "(resubmit)" : "", worktime ); } static bool test_work_current(struct work *); static void _submit_work_async(struct work *); static void maybe_local_submit(const struct work *work) { #if BLKMAKER_VERSION > 3 if (unlikely(work->block && work->tmpl)) { // This is a block with a full template (GBT) // Regardless of the result, submit to local bitcoind(s) as well struct work *work_cp; char *p; for (int i = 0; i < total_pools; ++i) { p = strchr(pools[i]->rpc_url, '#'); if (likely(!(p && strstr(&p[1], "allblocks")))) continue; applog(LOG_DEBUG, "Attempting submission of full block to pool %d", pools[i]->pool_no); work_cp = copy_work(work); work_cp->pool = pools[i]; work_cp->do_foreign_submit = true; _submit_work_async(work_cp); } } #endif } /* Theoretically threads could race when modifying accepted and * rejected values but the chance of two submits completing at the * same time is zero so there is no point adding extra locking */ static void share_result(json_t *val, json_t *res, json_t *err, const struct work *work, /*char *hashshow,*/ bool resubmit, char *worktime) { struct pool *pool = work->pool; struct cgpu_info *cgpu; cgpu = get_thr_cgpu(work->thr_id); if ((json_is_null(err) || !err) && (json_is_null(res) || json_is_true(res))) { mutex_lock(&stats_lock); cgpu->accepted++; total_accepted++; pool->accepted++; cgpu->diff_accepted += work->work_difficulty; total_diff_accepted += work->work_difficulty; pool->diff_accepted += work->work_difficulty; mutex_unlock(&stats_lock); pool->seq_rejects = 0; cgpu->last_share_pool = pool->pool_no; cgpu->last_share_pool_time = time(NULL); cgpu->last_share_diff = work->work_difficulty; pool->last_share_time = cgpu->last_share_pool_time; pool->last_share_diff = work->work_difficulty; applog(LOG_DEBUG, "PROOF OF WORK RESULT: true (yay!!!)"); if (!QUIET) { share_result_msg(work, "Accepted", "", resubmit, worktime); } sharelog("accept", work); if (opt_shares && total_diff_accepted >= opt_shares) { applog(LOG_WARNING, "Successfully mined %d accepted shares as requested and exiting.", opt_shares); kill_work(); return; } /* Detect if a pool that has been temporarily disabled for * continually rejecting shares has started accepting shares. * This will only happen with the work returned from a * longpoll */ if (unlikely(pool->enabled == POOL_REJECTING)) { applog(LOG_WARNING, "Rejecting pool %d now accepting shares, re-enabling!", pool->pool_no); enable_pool(pool); switch_pools(NULL); } if (unlikely(work->block)) { // Force moving on to this new block :) struct work fakework; memset(&fakework, 0, sizeof(fakework)); fakework.pool = work->pool; // Copy block version, bits, and time from share memcpy(&fakework.data[ 0], &work->data[ 0], 4); memcpy(&fakework.data[68], &work->data[68], 8); // Set prevblock to winning hash (swap32'd) swap32yes(&fakework.data[4], &work->hash[0], 32 / 4); test_work_current(&fakework); } } else { mutex_lock(&stats_lock); cgpu->rejected++; total_rejected++; pool->rejected++; cgpu->diff_rejected += work->work_difficulty; total_diff_rejected += work->work_difficulty; pool->diff_rejected += work->work_difficulty; pool->seq_rejects++; mutex_unlock(&stats_lock); applog(LOG_DEBUG, "PROOF OF WORK RESULT: false (booooo)"); if (!QUIET) { char where[20]; char disposition[36] = "reject"; char reason[32]; strcpy(reason, ""); if (total_pools > 1) snprintf(where, sizeof(where), "pool %d", work->pool->pool_no); else strcpy(where, ""); if (!json_is_string(res)) res = json_object_get(val, "reject-reason"); if (res) { const char *reasontmp = json_string_value(res); size_t reasonLen = strlen(reasontmp); if (reasonLen > 28) reasonLen = 28; reason[0] = ' '; reason[1] = '('; memcpy(2 + reason, reasontmp, reasonLen); reason[reasonLen + 2] = ')'; reason[reasonLen + 3] = '\0'; memcpy(disposition + 7, reasontmp, reasonLen); disposition[6] = ':'; disposition[reasonLen + 7] = '\0'; } else if (work->stratum && err && json_is_array(err)) { json_t *reason_val = json_array_get(err, 1); char *reason_str; if (reason_val && json_is_string(reason_val)) { reason_str = (char *)json_string_value(reason_val); snprintf(reason, 31, " (%s)", reason_str); } } share_result_msg(work, "Rejected", reason, resubmit, worktime); sharelog(disposition, work); } /* Once we have more than a nominal amount of sequential rejects, * at least 10 and more than 3 mins at the current utility, * disable the pool because some pool error is likely to have * ensued. Do not do this if we know the share just happened to * be stale due to networking delays. */ if (pool->seq_rejects > 10 && !work->stale && opt_disable_pool && enabled_pools > 1) { double utility = total_accepted / total_secs * 60; if (pool->seq_rejects > utility * 3) { applog(LOG_WARNING, "Pool %d rejected %d sequential shares, disabling!", pool->pool_no, pool->seq_rejects); reject_pool(pool); if (pool == current_pool()) switch_pools(NULL); pool->seq_rejects = 0; } } } maybe_local_submit(work); } static char *submit_upstream_work_request(struct work *work) { char *hexstr = NULL; char *s, *sd; struct pool *pool = work->pool; if (work->tmpl) { json_t *req; unsigned char data[80]; swap32yes(data, work->data, 80 / 4); #if BLKMAKER_VERSION > 3 if (work->do_foreign_submit) req = blkmk_submit_foreign_jansson(work->tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76]))); else #endif req = blkmk_submit_jansson(work->tmpl, data, work->dataid, le32toh(*((uint32_t*)&work->data[76]))); s = json_dumps(req, 0); json_decref(req); sd = malloc(161); bin2hex(sd, data, 80); } else { /* build hex string */ hexstr = malloc((sizeof(work->data) * 2) + 1); bin2hex(hexstr, work->data, sizeof(work->data)); /* build JSON-RPC request */ s = strdup("{\"method\": \"getwork\", \"params\": [ \""); s = realloc_strcat(s, hexstr); s = realloc_strcat(s, "\" ], \"id\":1}"); free(hexstr); sd = s; } applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->rpc_url, sd); if (work->tmpl) free(sd); else s = realloc_strcat(s, "\n"); return s; } static bool submit_upstream_work_completed(struct work *work, bool resubmit, struct timeval *ptv_submit, json_t *val) { json_t *res, *err; bool rc = false; int thr_id = work->thr_id; struct pool *pool = work->pool; struct timeval tv_submit_reply; time_t ts_submit_reply; char worktime[200] = ""; cgtime(&tv_submit_reply); ts_submit_reply = time(NULL); if (unlikely(!val)) { applog(LOG_INFO, "submit_upstream_work json_rpc_call failed"); if (!pool_tset(pool, &pool->submit_fail)) { total_ro++; pool->remotefail_occasions++; applog(LOG_WARNING, "Pool %d communication failure, caching submissions", pool->pool_no); } goto out; } else if (pool_tclear(pool, &pool->submit_fail)) applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no); res = json_object_get(val, "result"); err = json_object_get(val, "error"); if (!QUIET) { if (opt_worktime) { char workclone[20]; struct tm _tm; struct tm *tm, tm_getwork, tm_submit_reply; tm = &_tm; double getwork_time = tdiff((struct timeval *)&(work->tv_getwork_reply), (struct timeval *)&(work->tv_getwork)); double getwork_to_work = tdiff((struct timeval *)&(work->tv_work_start), (struct timeval *)&(work->tv_getwork_reply)); double work_time = tdiff((struct timeval *)&(work->tv_work_found), (struct timeval *)&(work->tv_work_start)); double work_to_submit = tdiff(ptv_submit, (struct timeval *)&(work->tv_work_found)); double submit_time = tdiff(&tv_submit_reply, ptv_submit); int diffplaces = 3; localtime_r(&work->ts_getwork, tm); memcpy(&tm_getwork, tm, sizeof(struct tm)); localtime_r(&ts_submit_reply, tm); memcpy(&tm_submit_reply, tm, sizeof(struct tm)); if (work->clone) { snprintf(workclone, sizeof(workclone), "C:%1.3f", tdiff((struct timeval *)&(work->tv_cloned), (struct timeval *)&(work->tv_getwork_reply))); } else strcpy(workclone, "O"); if (work->work_difficulty < 1) diffplaces = 6; snprintf(worktime, sizeof(worktime), " <-%08lx.%08lx M:%c D:%1.*f G:%02d:%02d:%02d:%1.3f %s (%1.3f) W:%1.3f (%1.3f) S:%1.3f R:%02d:%02d:%02d", (unsigned long)be32toh(*(uint32_t *)&(work->data[opt_scrypt ? 32 : 28])), (unsigned long)be32toh(*(uint32_t *)&(work->data[opt_scrypt ? 28 : 24])), work->getwork_mode, diffplaces, work->work_difficulty, tm_getwork.tm_hour, tm_getwork.tm_min, tm_getwork.tm_sec, getwork_time, workclone, getwork_to_work, work_time, work_to_submit, submit_time, tm_submit_reply.tm_hour, tm_submit_reply.tm_min, tm_submit_reply.tm_sec); } } share_result(val, res, err, work, resubmit, worktime); if (!opt_realquiet) print_status(thr_id); if (!want_per_device_stats) { char logline[256]; struct cgpu_info *cgpu; cgpu = get_thr_cgpu(thr_id); get_statline(logline, sizeof(logline), cgpu); applog(LOG_INFO, "%s", logline); } json_decref(val); rc = true; out: return rc; } /* Specifies whether we can use this pool for work or not. */ static bool pool_unworkable(struct pool *pool) { if (pool->idle) return true; if (pool->enabled != POOL_ENABLED) return true; if (pool->has_stratum && !pool->stratum_active) return true; return false; } /* In balanced mode, the amount of diff1 solutions per pool is monitored as a * rolling average per 10 minutes and if pools start getting more, it biases * away from them to distribute work evenly. The share count is reset to the * rolling average every 10 minutes to not send all work to one pool after it * has been disabled/out for an extended period. */ static struct pool *select_balanced(struct pool *cp) { int i, lowest = cp->shares; struct pool *ret = cp; for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; if (pool_unworkable(pool)) continue; if (pool->shares < lowest) { lowest = pool->shares; ret = pool; } } ret->shares++; return ret; } static bool pool_active(struct pool *, bool pinging); static void pool_died(struct pool *); static struct pool *priority_pool(int choice); static bool pool_unusable(struct pool *pool); /* Select any active pool in a rotating fashion when loadbalance is chosen if * it has any quota left. */ static inline struct pool *select_pool(bool lagging) { static int rotating_pool = 0; struct pool *pool, *cp; bool avail = false; int tested, i; cp = current_pool(); retry: if (pool_strategy == POOL_BALANCE) { pool = select_balanced(cp); goto out; } if (pool_strategy != POOL_LOADBALANCE && (!lagging || opt_fail_only)) { pool = cp; goto out; } else pool = NULL; for (i = 0; i < total_pools; i++) { struct pool *tp = pools[i]; if (tp->quota_used < tp->quota_gcd) { avail = true; break; } } /* There are no pools with quota, so reset them. */ if (!avail) { for (i = 0; i < total_pools; i++) pools[i]->quota_used = 0; if (++rotating_pool >= total_pools) rotating_pool = 0; } /* Try to find the first pool in the rotation that is usable */ tested = 0; while (!pool && tested++ < total_pools) { pool = pools[rotating_pool]; if (pool->quota_used++ < pool->quota_gcd) { if (!pool_unworkable(pool)) break; /* Failover-only flag for load-balance means distribute * unused quota to priority pool 0. */ if (opt_fail_only) priority_pool(0)->quota_used--; } pool = NULL; if (++rotating_pool >= total_pools) rotating_pool = 0; } /* If there are no alive pools with quota, choose according to * priority. */ if (!pool) { for (i = 0; i < total_pools; i++) { struct pool *tp = priority_pool(i); if (!pool_unusable(tp)) { pool = tp; break; } } } /* If still nothing is usable, use the current pool */ if (!pool) pool = cp; out: if (cp != pool) { if (!pool_active(pool, false)) { pool_died(pool); goto retry; } pool_tclear(pool, &pool->idle); } applog(LOG_DEBUG, "Selecting pool %d for work", pool->pool_no); return pool; } static double DIFFEXACTONE = 26959946667150639794667015087019630673637144422540572481103610249215.0; static const uint64_t diffone = 0xFFFF000000000000ull; static double target_diff(const unsigned char *target) { double targ = 0; signed int i; for (i = 31; i >= 0; --i) targ = (targ * 0x100) + target[i]; return DIFFEXACTONE / (targ ?: 1); } /* * Calculate the work share difficulty */ static void calc_diff(struct work *work, int known) { struct cgminer_pool_stats *pool_stats = &(work->pool->cgminer_pool_stats); double difficulty; if (!known) { work->work_difficulty = target_diff(work->target); } else work->work_difficulty = known; difficulty = work->work_difficulty; pool_stats->last_diff = difficulty; suffix_string((uint64_t)difficulty, work->pool->diff, sizeof(work->pool->diff), 0); if (difficulty == pool_stats->min_diff) pool_stats->min_diff_count++; else if (difficulty < pool_stats->min_diff || pool_stats->min_diff == 0) { pool_stats->min_diff = difficulty; pool_stats->min_diff_count = 1; } if (difficulty == pool_stats->max_diff) pool_stats->max_diff_count++; else if (difficulty > pool_stats->max_diff) { pool_stats->max_diff = difficulty; pool_stats->max_diff_count = 1; } } static void get_benchmark_work(struct work *work) { // Use a random work block pulled from a pool static uint8_t bench_block[] = { CGMINER_BENCHMARK_BLOCK }; size_t bench_size = sizeof(*work); size_t work_size = sizeof(bench_block); size_t min_size = (work_size < bench_size ? work_size : bench_size); memset(work, 0, sizeof(*work)); memcpy(work, &bench_block, min_size); work->mandatory = true; work->pool = pools[0]; cgtime(&work->tv_getwork); copy_time(&work->tv_getwork_reply, &work->tv_getwork); work->getwork_mode = GETWORK_MODE_BENCHMARK; calc_diff(work, 0); } static void wake_gws(void); static void update_last_work(struct work *work) { if (!work->tmpl) // Only save GBT jobs, since rollntime isn't coordinated well yet return; struct pool *pool = work->pool; mutex_lock(&pool->last_work_lock); if (pool->last_work_copy) free_work(pool->last_work_copy); pool->last_work_copy = copy_work(work); pool->last_work_copy->work_restart_id = pool->work_restart_id; mutex_unlock(&pool->last_work_lock); } static void gbt_req_target(json_t *req) { json_t *j; json_t *n; if (!request_target_str) return; j = json_object_get(req, "params"); if (!j) { n = json_array(); if (!n) return; if (json_object_set_new(req, "params", n)) goto erradd; j = n; } n = json_array_get(j, 0); if (!n) { n = json_object(); if (!n) return; if (json_array_append_new(j, n)) goto erradd; } j = n; n = json_string(request_target_str); if (!n) return; if (json_object_set_new(j, "target", n)) goto erradd; return; erradd: json_decref(n); } static char *prepare_rpc_req2(struct work *work, enum pool_protocol proto, const char *lpid, bool probe) { char *rpc_req; clean_work(work); switch (proto) { case PLP_GETWORK: work->getwork_mode = GETWORK_MODE_POOL; return strdup(getwork_req); case PLP_GETBLOCKTEMPLATE: work->getwork_mode = GETWORK_MODE_GBT; work->tmpl_refcount = malloc(sizeof(*work->tmpl_refcount)); if (!work->tmpl_refcount) return NULL; work->tmpl = blktmpl_create(); if (!work->tmpl) goto gbtfail2; *work->tmpl_refcount = 1; gbt_capabilities_t caps = blktmpl_addcaps(work->tmpl); if (!caps) goto gbtfail; caps |= GBT_LONGPOLL; #if BLKMAKER_VERSION > 1 if (opt_coinbase_script.sz) caps |= GBT_CBVALUE; #endif json_t *req = blktmpl_request_jansson(caps, lpid); if (!req) goto gbtfail; if (probe) gbt_req_target(req); rpc_req = json_dumps(req, 0); if (!rpc_req) goto gbtfail; json_decref(req); return rpc_req; default: return NULL; } return NULL; gbtfail: blktmpl_free(work->tmpl); work->tmpl = NULL; gbtfail2: free(work->tmpl_refcount); work->tmpl_refcount = NULL; return NULL; } #define prepare_rpc_req(work, proto, lpid) prepare_rpc_req2(work, proto, lpid, false) #define prepare_rpc_req_probe(work, proto, lpid) prepare_rpc_req2(work, proto, lpid, true) static const char *pool_protocol_name(enum pool_protocol proto) { switch (proto) { case PLP_GETBLOCKTEMPLATE: return "getblocktemplate"; case PLP_GETWORK: return "getwork"; default: return "UNKNOWN"; } } static enum pool_protocol pool_protocol_fallback(enum pool_protocol proto) { switch (proto) { case PLP_GETBLOCKTEMPLATE: if (want_getwork) return PLP_GETWORK; default: return PLP_NONE; } } static bool get_upstream_work(struct work *work, CURL *curl) { struct pool *pool = work->pool; struct cgminer_pool_stats *pool_stats = &(pool->cgminer_pool_stats); struct timeval tv_elapsed; json_t *val = NULL; bool rc = false; char *url; enum pool_protocol proto; char *rpc_req; if (pool->proto == PLP_NONE) pool->proto = PLP_GETBLOCKTEMPLATE; tryagain: rpc_req = prepare_rpc_req(work, pool->proto, NULL); work->pool = pool; if (!rpc_req) return false; applog(LOG_DEBUG, "DBG: sending %s get RPC call: %s", pool->rpc_url, rpc_req); url = pool->rpc_url; cgtime(&work->tv_getwork); val = json_rpc_call(curl, url, pool->rpc_userpass, rpc_req, false, false, &work->rolltime, pool, false); pool_stats->getwork_attempts++; free(rpc_req); if (likely(val)) { rc = work_decode(pool, work, val); if (unlikely(!rc)) applog(LOG_DEBUG, "Failed to decode work in get_upstream_work"); } else if (PLP_NONE != (proto = pool_protocol_fallback(pool->proto))) { applog(LOG_WARNING, "Pool %u failed getblocktemplate request; falling back to getwork protocol", pool->pool_no); pool->proto = proto; goto tryagain; } else applog(LOG_DEBUG, "Failed json_rpc_call in get_upstream_work"); cgtime(&work->tv_getwork_reply); timersub(&(work->tv_getwork_reply), &(work->tv_getwork), &tv_elapsed); pool_stats->getwork_wait_rolling += ((double)tv_elapsed.tv_sec + ((double)tv_elapsed.tv_usec / 1000000)) * 0.63; pool_stats->getwork_wait_rolling /= 1.63; timeradd(&tv_elapsed, &(pool_stats->getwork_wait), &(pool_stats->getwork_wait)); if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_max), >)) { pool_stats->getwork_wait_max.tv_sec = tv_elapsed.tv_sec; pool_stats->getwork_wait_max.tv_usec = tv_elapsed.tv_usec; } if (timercmp(&tv_elapsed, &(pool_stats->getwork_wait_min), <)) { pool_stats->getwork_wait_min.tv_sec = tv_elapsed.tv_sec; pool_stats->getwork_wait_min.tv_usec = tv_elapsed.tv_usec; } pool_stats->getwork_calls++; work->pool = pool; work->longpoll = false; calc_diff(work, 0); total_getworks++; pool->getwork_requested++; if (rc) update_last_work(work); if (likely(val)) json_decref(val); return rc; } #ifdef HAVE_CURSES static void disable_curses(void) { if (curses_active_locked()) { use_curses = false; curses_active = false; leaveok(logwin, false); leaveok(statuswin, false); leaveok(mainwin, false); nocbreak(); echo(); delwin(logwin); delwin(statuswin); delwin(mainwin); endwin(); #ifdef WIN32 // Move the cursor to after curses output. HANDLE hout = GetStdHandle(STD_OUTPUT_HANDLE); CONSOLE_SCREEN_BUFFER_INFO csbi; COORD coord; if (GetConsoleScreenBufferInfo(hout, &csbi)) { coord.X = 0; coord.Y = csbi.dwSize.Y - 1; SetConsoleCursorPosition(hout, coord); } #endif unlock_curses(); } } #endif static void __kill_work(void) { struct cgpu_info *cgpu; struct thr_info *thr; int i; if (!successful_connect) return; applog(LOG_INFO, "Received kill message"); shutting_down = true; applog(LOG_DEBUG, "Prompting submit_work thread to finish"); notifier_wake(submit_waiting_notifier); #ifdef USE_LIBMICROHTTPD httpsrv_stop(); #endif applog(LOG_DEBUG, "Killing off watchpool thread"); /* Kill the watchpool thread */ thr = &control_thr[watchpool_thr_id]; thr_info_cancel(thr); applog(LOG_DEBUG, "Killing off watchdog thread"); /* Kill the watchdog thread */ thr = &control_thr[watchdog_thr_id]; thr_info_cancel(thr); applog(LOG_DEBUG, "Shutting down mining threads"); for (i = 0; i < mining_threads; i++) { thr = get_thread(i); if (!thr) continue; cgpu = thr->cgpu; if (!cgpu) continue; if (!cgpu->threads) continue; cgpu->shutdown = true; thr->work_restart = true; notifier_wake(thr->notifier); notifier_wake(thr->work_restart_notifier); } sleep(1); applog(LOG_DEBUG, "Killing off mining threads"); /* Kill the mining threads*/ for (i = 0; i < mining_threads; i++) { thr = get_thread(i); if (!thr) continue; cgpu = thr->cgpu; if (cgpu->threads) { applog(LOG_WARNING, "Killing %"PRIpreprv, thr->cgpu->proc_repr); thr_info_cancel(thr); } cgpu->status = LIFE_DEAD2; } /* Stop the others */ applog(LOG_DEBUG, "Killing off API thread"); thr = &control_thr[api_thr_id]; thr_info_cancel(thr); } /* This should be the common exit path */ void kill_work(void) { __kill_work(); quit(0, "Shutdown signal received."); } static #ifdef WIN32 #ifndef _WIN64 const #endif #endif char **initial_args; void _bfg_clean_up(bool); void app_restart(void) { applog(LOG_WARNING, "Attempting to restart %s", packagename); __kill_work(); _bfg_clean_up(true); #if defined(unix) || defined(__APPLE__) if (forkpid > 0) { kill(forkpid, SIGTERM); forkpid = 0; } #endif execv(initial_args[0], initial_args); applog(LOG_WARNING, "Failed to restart application"); } static void sighandler(int __maybe_unused sig) { /* Restore signal handlers so we can still quit if kill_work fails */ sigaction(SIGTERM, &termhandler, NULL); sigaction(SIGINT, &inthandler, NULL); kill_work(); } static void start_longpoll(void); static void stop_longpoll(void); /* Called with pool_lock held. Recruit an extra curl if none are available for * this pool. */ static void recruit_curl(struct pool *pool) { struct curl_ent *ce = calloc(sizeof(struct curl_ent), 1); if (unlikely(!ce)) quit(1, "Failed to calloc in recruit_curl"); ce->curl = curl_easy_init(); if (unlikely(!ce->curl)) quit(1, "Failed to init in recruit_curl"); LL_PREPEND(pool->curllist, ce); pool->curls++; } /* Grab an available curl if there is one. If not, then recruit extra curls * unless we are in a submit_fail situation, or we have opt_delaynet enabled * and there are already 5 curls in circulation. Limit total number to the * number of mining threads per pool as well to prevent blasting a pool during * network delays/outages. */ static struct curl_ent *pop_curl_entry3(struct pool *pool, int blocking) { int curl_limit = opt_delaynet ? 5 : (mining_threads + opt_queue) * 2; bool recruited = false; struct curl_ent *ce; mutex_lock(&pool->pool_lock); retry: if (!pool->curls) { recruit_curl(pool); recruited = true; } else if (!pool->curllist) { if (blocking < 2 && pool->curls >= curl_limit && (blocking || pool->curls >= opt_submit_threads)) { if (!blocking) { mutex_unlock(&pool->pool_lock); return NULL; } pthread_cond_wait(&pool->cr_cond, &pool->pool_lock); goto retry; } else { recruit_curl(pool); recruited = true; } } ce = pool->curllist; LL_DELETE(pool->curllist, ce); mutex_unlock(&pool->pool_lock); if (recruited) applog(LOG_DEBUG, "Recruited curl for pool %d", pool->pool_no); return ce; } static struct curl_ent *pop_curl_entry2(struct pool *pool, bool blocking) { return pop_curl_entry3(pool, blocking ? 1 : 0); } __maybe_unused static struct curl_ent *pop_curl_entry(struct pool *pool) { return pop_curl_entry3(pool, 1); } static void push_curl_entry(struct curl_ent *ce, struct pool *pool) { mutex_lock(&pool->pool_lock); if (!ce || !ce->curl) quithere(1, "Attempted to add NULL"); LL_PREPEND(pool->curllist, ce); cgtime(&ce->tv); pthread_cond_broadcast(&pool->cr_cond); mutex_unlock(&pool->pool_lock); } bool stale_work(struct work *work, bool share); static inline bool should_roll(struct work *work) { struct timeval now; time_t expiry; if (work->pool != current_pool() && pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) return false; if (stale_work(work, false)) return false; if (work->rolltime > opt_scantime) expiry = work->rolltime; else expiry = opt_scantime; expiry = expiry * 2 / 3; /* We shouldn't roll if we're unlikely to get one shares' duration * work out of doing so */ cgtime(&now); if (now.tv_sec - work->tv_staged.tv_sec > expiry) return false; return true; } /* Limit rolls to 7000 to not beyond 2 hours in the future where bitcoind will * reject blocks as invalid. */ static inline bool can_roll(struct work *work) { if (work->stratum) return false; if (!(work->pool && !work->clone)) return false; if (work->tmpl) { if (stale_work(work, false)) return false; return blkmk_work_left(work->tmpl); } return (work->rolltime && work->rolls < 7000 && !stale_work(work, false)); } static void roll_work(struct work *work) { if (work->tmpl) { struct timeval tv_now; cgtime(&tv_now); if (blkmk_get_data(work->tmpl, work->data, 80, tv_now.tv_sec, NULL, &work->dataid) < 76) applog(LOG_ERR, "Failed to get next data from template; spinning wheels!"); swap32yes(work->data, work->data, 80 / 4); calc_midstate(work); applog(LOG_DEBUG, "Successfully rolled extranonce to dataid %u", work->dataid); } else { uint32_t *work_ntime; uint32_t ntime; work_ntime = (uint32_t *)(work->data + 68); ntime = be32toh(*work_ntime); ntime++; *work_ntime = htobe32(ntime); applog(LOG_DEBUG, "Successfully rolled time header in work"); } local_work++; work->rolls++; work->blk.nonce = 0; /* This is now a different work item so it needs a different ID for the * hashtable */ work->id = total_work++; } /* Duplicates any dynamically allocated arrays within the work struct to * prevent a copied work struct from freeing ram belonging to another struct */ static void _copy_work(struct work *work, const struct work *base_work, int noffset) { int id = work->id; clean_work(work); memcpy(work, base_work, sizeof(struct work)); /* Keep the unique new id assigned during make_work to prevent copied * work from having the same id. */ work->id = id; if (base_work->job_id) work->job_id = strdup(base_work->job_id); if (base_work->nonce1) work->nonce1 = strdup(base_work->nonce1); bytes_cpy(&work->nonce2, &base_work->nonce2); if (base_work->tmpl) { struct pool *pool = work->pool; mutex_lock(&pool->pool_lock); ++*work->tmpl_refcount; mutex_unlock(&pool->pool_lock); } if (noffset) { uint32_t *work_ntime = (uint32_t *)(work->data + 68); uint32_t ntime = be32toh(*work_ntime); ntime += noffset; *work_ntime = htobe32(ntime); } } /* Generates a copy of an existing work struct, creating fresh heap allocations * for all dynamically allocated arrays within the struct */ struct work *copy_work(const struct work *base_work) { struct work *work = make_work(); _copy_work(work, base_work, 0); return work; } void __copy_work(struct work *work, const struct work *base_work) { _copy_work(work, base_work, 0); } static struct work *make_clone(struct work *work) { struct work *work_clone = copy_work(work); work_clone->clone = true; cgtime((struct timeval *)&(work_clone->tv_cloned)); work_clone->longpoll = false; work_clone->mandatory = false; /* Make cloned work appear slightly older to bias towards keeping the * master work item which can be further rolled */ work_clone->tv_staged.tv_sec -= 1; return work_clone; } static void stage_work(struct work *work); static bool clone_available(void) { struct work *work_clone = NULL, *work, *tmp; bool cloned = false; mutex_lock(stgd_lock); if (!staged_rollable) goto out_unlock; HASH_ITER(hh, staged_work, work, tmp) { if (can_roll(work) && should_roll(work)) { roll_work(work); work_clone = make_clone(work); applog(LOG_DEBUG, "%s: Rolling work %d to %d", __func__, work->id, work_clone->id); roll_work(work); cloned = true; break; } } out_unlock: mutex_unlock(stgd_lock); if (cloned) { applog(LOG_DEBUG, "Pushing cloned available work to stage thread"); stage_work(work_clone); } return cloned; } static void pool_died(struct pool *pool) { if (!pool_tset(pool, &pool->idle)) { cgtime(&pool->tv_idle); if (pool == current_pool()) { applog(LOG_WARNING, "Pool %d %s not responding!", pool->pool_no, pool->rpc_url); switch_pools(NULL); } else applog(LOG_INFO, "Pool %d %s failed to return work", pool->pool_no, pool->rpc_url); } } bool stale_work(struct work *work, bool share) { unsigned work_expiry; struct pool *pool; uint32_t block_id; unsigned getwork_delay; if (opt_benchmark) return false; block_id = ((uint32_t*)work->data)[1]; pool = work->pool; /* Technically the rolltime should be correct but some pools * advertise a broken expire= that is lower than a meaningful * scantime */ if (work->rolltime >= opt_scantime || work->tmpl) work_expiry = work->rolltime; else work_expiry = opt_expiry; unsigned max_expiry = (have_longpoll ? opt_expiry_lp : opt_expiry); if (work_expiry > max_expiry) work_expiry = max_expiry; if (share) { /* If the share isn't on this pool's latest block, it's stale */ if (pool->block_id && pool->block_id != block_id) { applog(LOG_DEBUG, "Share stale due to block mismatch (%08lx != %08lx)", (long)block_id, (long)pool->block_id); return true; } /* If the pool doesn't want old shares, then any found in work before * the most recent longpoll is stale */ if ((!pool->submit_old) && work->work_restart_id != pool->work_restart_id) { applog(LOG_DEBUG, "Share stale due to mandatory work update (%02x != %02x)", work->work_restart_id, pool->work_restart_id); return true; } } else { /* If this work isn't for the latest Bitcoin block, it's stale */ /* But only care about the current pool if failover-only */ if (enabled_pools <= 1 || opt_fail_only) { if (pool->block_id && block_id != pool->block_id) { applog(LOG_DEBUG, "Work stale due to block mismatch (%08lx != 1 ? %08lx : %08lx)", (long)block_id, (long)pool->block_id, (long)current_block_id); return true; } } else { if (block_id != current_block_id) { applog(LOG_DEBUG, "Work stale due to block mismatch (%08lx != 0 ? %08lx : %08lx)", (long)block_id, (long)pool->block_id, (long)current_block_id); return true; } } /* If the pool has asked us to restart since this work, it's stale */ if (work->work_restart_id != pool->work_restart_id) { applog(LOG_DEBUG, "Work stale due to work update (%02x != %02x)", work->work_restart_id, pool->work_restart_id); return true; } if (pool->has_stratum && work->job_id) { bool same_job; if (!pool->stratum_active || !pool->stratum_notify) { applog(LOG_DEBUG, "Work stale due to stratum inactive"); return true; } same_job = true; cg_rlock(&pool->data_lock); if (strcmp(work->job_id, pool->swork.job_id)) same_job = false; cg_runlock(&pool->data_lock); if (!same_job) { applog(LOG_DEBUG, "Work stale due to stratum job_id mismatch"); return true; } } /* Factor in the average getwork delay of this pool, rounding it up to * the nearest second */ getwork_delay = pool->cgminer_pool_stats.getwork_wait_rolling * 5 + 1; if (unlikely(work_expiry <= getwork_delay + 5)) work_expiry = 5; else work_expiry -= getwork_delay; } int elapsed_since_staged = timer_elapsed(&work->tv_staged, NULL); if (elapsed_since_staged > work_expiry) { applog(LOG_DEBUG, "%s stale due to expiry (%d >= %u)", share?"Share":"Work", elapsed_since_staged, work_expiry); return true; } /* If the user only wants strict failover, any work from a pool other than * the current one is always considered stale */ if (opt_fail_only && !share && pool != current_pool() && !work->mandatory && pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) { applog(LOG_DEBUG, "Work stale due to fail only pool mismatch (pool %u vs %u)", pool->pool_no, current_pool()->pool_no); return true; } return false; } static uint64_t share_diff(const struct work *work) { uint64_t ret; bool new_best = false; ret = target_diff(work->hash); cg_wlock(&control_lock); if (unlikely(ret > best_diff)) { new_best = true; best_diff = ret; suffix_string(best_diff, best_share, sizeof(best_share), 0); } if (unlikely(ret > work->pool->best_diff)) work->pool->best_diff = ret; cg_wunlock(&control_lock); if (unlikely(new_best)) applog(LOG_INFO, "New best share: %s", best_share); return ret; } static void regen_hash(struct work *work) { hash_data(work->hash, work->data); } static void rebuild_hash(struct work *work) { if (opt_scrypt) scrypt_regenhash(work); else regen_hash(work); work->share_diff = share_diff(work); if (unlikely(work->share_diff >= current_diff)) { work->block = true; work->pool->solved++; found_blocks++; work->mandatory = true; applog(LOG_NOTICE, "Found block for pool %d!", work->pool->pool_no); } } static void submit_discard_share2(const char *reason, struct work *work) { struct cgpu_info *cgpu = get_thr_cgpu(work->thr_id); sharelog(reason, work); mutex_lock(&stats_lock); ++total_stale; ++cgpu->stale; ++(work->pool->stale_shares); total_diff_stale += work->work_difficulty; cgpu->diff_stale += work->work_difficulty; work->pool->diff_stale += work->work_difficulty; mutex_unlock(&stats_lock); } static void submit_discard_share(struct work *work) { submit_discard_share2("discard", work); } struct submit_work_state { struct work *work; bool resubmit; struct curl_ent *ce; int failures; struct timeval tv_staleexpire; char *s; struct timeval tv_submit; struct submit_work_state *next; }; static int my_curl_timer_set(__maybe_unused CURLM *curlm, long timeout_ms, void *userp) { long *p_timeout_us = userp; const long max_ms = LONG_MAX / 1000; if (max_ms < timeout_ms) timeout_ms = max_ms; *p_timeout_us = timeout_ms * 1000; return 0; } static void sws_has_ce(struct submit_work_state *sws) { struct pool *pool = sws->work->pool; sws->s = submit_upstream_work_request(sws->work); cgtime(&sws->tv_submit); json_rpc_call_async(sws->ce->curl, pool->rpc_url, pool->rpc_userpass, sws->s, false, pool, true, sws); } static struct submit_work_state *begin_submission(struct work *work) { struct pool *pool; struct submit_work_state *sws = NULL; pool = work->pool; sws = malloc(sizeof(*sws)); *sws = (struct submit_work_state){ .work = work, }; rebuild_hash(work); if (stale_work(work, true)) { work->stale = true; if (opt_submit_stale) applog(LOG_NOTICE, "Pool %d stale share detected, submitting as user requested", pool->pool_no); else if (pool->submit_old) applog(LOG_NOTICE, "Pool %d stale share detected, submitting as pool requested", pool->pool_no); else { applog(LOG_NOTICE, "Pool %d stale share detected, discarding", pool->pool_no); submit_discard_share(work); goto out; } timer_set_delay_from_now(&sws->tv_staleexpire, 300000000); } if (work->stratum) { char *s; s = malloc(1024); sws->s = s; } else { /* submit solution to bitcoin via JSON-RPC */ sws->ce = pop_curl_entry2(pool, false); if (sws->ce) { sws_has_ce(sws); } else { sws->next = pool->sws_waiting_on_curl; pool->sws_waiting_on_curl = sws; if (sws->next) applog(LOG_DEBUG, "submit_thread queuing submission"); else applog(LOG_WARNING, "submit_thread queuing submissions (see --submit-threads)"); } } return sws; out: free(sws); return NULL; } static bool retry_submission(struct submit_work_state *sws) { struct work *work = sws->work; struct pool *pool = work->pool; sws->resubmit = true; if ((!work->stale) && stale_work(work, true)) { work->stale = true; if (opt_submit_stale) applog(LOG_NOTICE, "Pool %d share became stale during submission failure, will retry as user requested", pool->pool_no); else if (pool->submit_old) applog(LOG_NOTICE, "Pool %d share became stale during submission failure, will retry as pool requested", pool->pool_no); else { applog(LOG_NOTICE, "Pool %d share became stale during submission failure, discarding", pool->pool_no); submit_discard_share(work); return false; } timer_set_delay_from_now(&sws->tv_staleexpire, 300000000); } if (unlikely((opt_retries >= 0) && (++sws->failures > opt_retries))) { applog(LOG_ERR, "Pool %d failed %d submission retries, discarding", pool->pool_no, opt_retries); submit_discard_share(work); return false; } else if (work->stale) { if (unlikely(opt_retries < 0 && timer_passed(&sws->tv_staleexpire, NULL))) { applog(LOG_NOTICE, "Pool %d stale share failed to submit for 5 minutes, discarding", pool->pool_no); submit_discard_share(work); return false; } } /* pause, then restart work-request loop */ applog(LOG_INFO, "json_rpc_call failed on submit_work, retrying"); cgtime(&sws->tv_submit); json_rpc_call_async(sws->ce->curl, pool->rpc_url, pool->rpc_userpass, sws->s, false, pool, true, sws); return true; } static void free_sws(struct submit_work_state *sws) { free(sws->s); free_work(sws->work); free(sws); } static void *submit_work_thread(__maybe_unused void *userdata) { int wip = 0; CURLM *curlm; long curlm_timeout_us = -1; struct timeval curlm_timer; struct submit_work_state *sws, **swsp; struct submit_work_state *write_sws = NULL; unsigned tsreduce = 0; pthread_detach(pthread_self()); RenameThread("submit_work"); applog(LOG_DEBUG, "Creating extra submit work thread"); curlm = curl_multi_init(); curlm_timeout_us = -1; curl_multi_setopt(curlm, CURLMOPT_TIMERDATA, &curlm_timeout_us); curl_multi_setopt(curlm, CURLMOPT_TIMERFUNCTION, my_curl_timer_set); fd_set rfds, wfds, efds; int maxfd; struct timeval tv_timeout, tv_now; int n; CURLMsg *cm; FD_ZERO(&rfds); while (1) { mutex_lock(&submitting_lock); total_submitting -= tsreduce; tsreduce = 0; if (FD_ISSET(submit_waiting_notifier[0], &rfds)) { notifier_read(submit_waiting_notifier); } // Receive any new submissions while (submit_waiting) { struct work *work = submit_waiting; DL_DELETE(submit_waiting, work); if ( (sws = begin_submission(work)) ) { if (sws->ce) curl_multi_add_handle(curlm, sws->ce->curl); else if (sws->s) { sws->next = write_sws; write_sws = sws; } ++wip; } else { --total_submitting; free_work(work); } } if (unlikely(shutting_down && !wip)) break; mutex_unlock(&submitting_lock); FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); tv_timeout.tv_sec = -1; // Setup cURL with select // Need to call perform to ensure the timeout gets updated curl_multi_perform(curlm, &n); curl_multi_fdset(curlm, &rfds, &wfds, &efds, &maxfd); if (curlm_timeout_us >= 0) { timer_set_delay_from_now(&curlm_timer, curlm_timeout_us); reduce_timeout_to(&tv_timeout, &curlm_timer); } // Setup waiting stratum submissions with select for (sws = write_sws; sws; sws = sws->next) { struct pool *pool = sws->work->pool; int fd = pool->sock; if (fd == INVSOCK || (!pool->stratum_init) || !pool->stratum_notify) continue; FD_SET(fd, &wfds); set_maxfd(&maxfd, fd); } // Setup "submit waiting" notifier with select FD_SET(submit_waiting_notifier[0], &rfds); set_maxfd(&maxfd, submit_waiting_notifier[0]); // Wait for something interesting to happen :) cgtime(&tv_now); if (select(maxfd+1, &rfds, &wfds, &efds, select_timeout(&tv_timeout, &tv_now)) < 0) { FD_ZERO(&rfds); continue; } // Handle any stratum ready-to-write results for (swsp = &write_sws; (sws = *swsp); ) { struct work *work = sws->work; struct pool *pool = work->pool; int fd = pool->sock; bool sessionid_match; if (fd == INVSOCK || (!pool->stratum_init) || (!pool->stratum_notify) || !FD_ISSET(fd, &wfds)) { next_write_sws: // TODO: Check if stale, possibly discard etc swsp = &sws->next; continue; } cg_rlock(&pool->data_lock); // NOTE: cgminer only does this check on retries, but BFGMiner does it for even the first/normal submit; therefore, it needs to be such that it always is true on the same connection regardless of session management // NOTE: Worst case scenario for a false positive: the pool rejects it as H-not-zero sessionid_match = (!pool->nonce1) || !strcmp(work->nonce1, pool->nonce1); cg_runlock(&pool->data_lock); if (!sessionid_match) { applog(LOG_DEBUG, "No matching session id for resubmitting stratum share"); submit_discard_share2("disconnect", work); ++tsreduce; next_write_sws_del: // Clear the fd from wfds, to avoid potentially blocking on other submissions to the same socket FD_CLR(fd, &wfds); // Delete sws for this submission, since we're done with it *swsp = sws->next; free_sws(sws); --wip; continue; } char *s = sws->s; struct stratum_share *sshare = calloc(sizeof(struct stratum_share), 1); int sshare_id; uint32_t nonce; char nonce2hex[(bytes_len(&work->nonce2) * 2) + 1]; char noncehex[9]; char ntimehex[9]; sshare->work = copy_work(work); bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2)); nonce = *((uint32_t *)(work->data + 76)); bin2hex(noncehex, (const unsigned char *)&nonce, 4); bin2hex(ntimehex, (void *)&work->data[68], 4); mutex_lock(&sshare_lock); /* Give the stratum share a unique id */ sshare_id = sshare->id = swork_id++; HASH_ADD_INT(stratum_shares, id, sshare); snprintf(s, 1024, "{\"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\": %d, \"method\": \"mining.submit\"}", pool->rpc_user, work->job_id, nonce2hex, ntimehex, noncehex, sshare->id); mutex_unlock(&sshare_lock); applog(LOG_DEBUG, "DBG: sending %s submit RPC call: %s", pool->stratum_url, s); if (likely(stratum_send(pool, s, strlen(s)))) { if (pool_tclear(pool, &pool->submit_fail)) applog(LOG_WARNING, "Pool %d communication resumed, submitting work", pool->pool_no); applog(LOG_DEBUG, "Successfully submitted, adding to stratum_shares db"); goto next_write_sws_del; } else if (!pool_tset(pool, &pool->submit_fail)) { // Undo stuff mutex_lock(&sshare_lock); // NOTE: Need to find it again in case something else has consumed it already (like the stratum-disconnect resubmitter...) HASH_FIND_INT(stratum_shares, &sshare_id, sshare); if (sshare) HASH_DEL(stratum_shares, sshare); mutex_unlock(&sshare_lock); if (sshare) { free_work(sshare->work); free(sshare); } applog(LOG_WARNING, "Pool %d stratum share submission failure", pool->pool_no); total_ro++; pool->remotefail_occasions++; if (!sshare) goto next_write_sws_del; goto next_write_sws; } } // Handle any cURL activities curl_multi_perform(curlm, &n); while( (cm = curl_multi_info_read(curlm, &n)) ) { if (cm->msg == CURLMSG_DONE) { bool finished; json_t *val = json_rpc_call_completed(cm->easy_handle, cm->data.result, false, NULL, &sws); curl_multi_remove_handle(curlm, cm->easy_handle); finished = submit_upstream_work_completed(sws->work, sws->resubmit, &sws->tv_submit, val); if (!finished) { if (retry_submission(sws)) curl_multi_add_handle(curlm, sws->ce->curl); else finished = true; } if (finished) { --wip; ++tsreduce; struct pool *pool = sws->work->pool; if (pool->sws_waiting_on_curl) { pool->sws_waiting_on_curl->ce = sws->ce; sws_has_ce(pool->sws_waiting_on_curl); pool->sws_waiting_on_curl = pool->sws_waiting_on_curl->next; curl_multi_add_handle(curlm, sws->ce->curl); } else { push_curl_entry(sws->ce, sws->work->pool); } free_sws(sws); } } } } assert(!write_sws); mutex_unlock(&submitting_lock); curl_multi_cleanup(curlm); applog(LOG_DEBUG, "submit_work thread exiting"); return NULL; } /* Find the pool that currently has the highest priority */ static struct pool *priority_pool(int choice) { struct pool *ret = NULL; int i; for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; if (pool->prio == choice) { ret = pool; break; } } if (unlikely(!ret)) { applog(LOG_ERR, "WTF No pool %d found!", choice); return pools[choice]; } return ret; } int prioritize_pools(char *param, int *pid) { char *ptr, *next; int i, pr, prio = 0; if (total_pools == 0) { return MSG_NOPOOL; } if (param == NULL || *param == '\0') { return MSG_MISPID; } bool pools_changed[total_pools]; int new_prio[total_pools]; for (i = 0; i < total_pools; ++i) pools_changed[i] = false; next = param; while (next && *next) { ptr = next; next = strchr(ptr, ','); if (next) *(next++) = '\0'; i = atoi(ptr); if (i < 0 || i >= total_pools) { *pid = i; return MSG_INVPID; } if (pools_changed[i]) { *pid = i; return MSG_DUPPID; } pools_changed[i] = true; new_prio[i] = prio++; } // Only change them if no errors for (i = 0; i < total_pools; i++) { if (pools_changed[i]) pools[i]->prio = new_prio[i]; } // In priority order, cycle through the unchanged pools and append them for (pr = 0; pr < total_pools; pr++) for (i = 0; i < total_pools; i++) { if (!pools_changed[i] && pools[i]->prio == pr) { pools[i]->prio = prio++; pools_changed[i] = true; break; } } if (current_pool()->prio) switch_pools(NULL); return MSG_POOLPRIO; } void validate_pool_priorities(void) { // TODO: this should probably do some sort of logging int i, j; bool used[total_pools]; bool valid[total_pools]; for (i = 0; i < total_pools; i++) used[i] = valid[i] = false; for (i = 0; i < total_pools; i++) { if (pools[i]->prio >=0 && pools[i]->prio < total_pools) { if (!used[pools[i]->prio]) { valid[i] = true; used[pools[i]->prio] = true; } } } for (i = 0; i < total_pools; i++) { if (!valid[i]) { for (j = 0; j < total_pools; j++) { if (!used[j]) { applog(LOG_WARNING, "Pool %d priority changed from %d to %d", i, pools[i]->prio, j); pools[i]->prio = j; used[j] = true; break; } } } } } static void clear_pool_work(struct pool *pool); /* Specifies whether we can switch to this pool or not. */ static bool pool_unusable(struct pool *pool) { if (pool->idle) return true; if (pool->enabled != POOL_ENABLED) return true; return false; } void switch_pools(struct pool *selected) { struct pool *pool, *last_pool; int i, pool_no, next_pool; cg_wlock(&control_lock); last_pool = currentpool; pool_no = currentpool->pool_no; /* Switch selected to pool number 0 and move the rest down */ if (selected) { if (selected->prio != 0) { for (i = 0; i < total_pools; i++) { pool = pools[i]; if (pool->prio < selected->prio) pool->prio++; } selected->prio = 0; } } switch (pool_strategy) { /* All of these set to the master pool */ case POOL_BALANCE: case POOL_FAILOVER: case POOL_LOADBALANCE: for (i = 0; i < total_pools; i++) { pool = priority_pool(i); if (pool_unusable(pool)) continue; pool_no = pool->pool_no; break; } break; /* Both of these simply increment and cycle */ case POOL_ROUNDROBIN: case POOL_ROTATE: if (selected && !selected->idle) { pool_no = selected->pool_no; break; } next_pool = pool_no; /* Select the next alive pool */ for (i = 1; i < total_pools; i++) { next_pool++; if (next_pool >= total_pools) next_pool = 0; pool = pools[next_pool]; if (pool_unusable(pool)) continue; pool_no = next_pool; break; } break; default: break; } currentpool = pools[pool_no]; pool = currentpool; cg_wunlock(&control_lock); /* Set the lagging flag to avoid pool not providing work fast enough * messages in failover only mode since we have to get all fresh work * as in restart_threads */ if (opt_fail_only) pool_tset(pool, &pool->lagging); if (pool != last_pool) { pool->block_id = 0; if (pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE) { applog(LOG_WARNING, "Switching to pool %d %s", pool->pool_no, pool->rpc_url); if (pool_localgen(pool) || opt_fail_only) clear_pool_work(last_pool); } } mutex_lock(&lp_lock); pthread_cond_broadcast(&lp_cond); mutex_unlock(&lp_lock); } static void discard_work(struct work *work) { if (!work->clone && !work->rolls && !work->mined) { if (work->pool) { work->pool->discarded_work++; work->pool->quota_used--; work->pool->works--; } total_discarded++; applog(LOG_DEBUG, "Discarded work"); } else applog(LOG_DEBUG, "Discarded cloned or rolled work"); free_work(work); } static void wake_gws(void) { mutex_lock(stgd_lock); pthread_cond_signal(&gws_cond); mutex_unlock(stgd_lock); } static void discard_stale(void) { struct work *work, *tmp; int stale = 0; mutex_lock(stgd_lock); HASH_ITER(hh, staged_work, work, tmp) { if (stale_work(work, false)) { HASH_DEL(staged_work, work); discard_work(work); stale++; staged_full = false; } } pthread_cond_signal(&gws_cond); mutex_unlock(stgd_lock); if (stale) applog(LOG_DEBUG, "Discarded %d stales that didn't match current hash", stale); } bool stale_work_future(struct work *work, bool share, unsigned long ustime) { bool rv; struct timeval tv, orig; ldiv_t d; d = ldiv(ustime, 1000000); tv = (struct timeval){ .tv_sec = d.quot, .tv_usec = d.rem, }; orig = work->tv_staged; timersub(&orig, &tv, &work->tv_staged); rv = stale_work(work, share); work->tv_staged = orig; return rv; } static void restart_threads(void) { struct pool *cp = current_pool(); int i; struct thr_info *thr; /* Artificially set the lagging flag to avoid pool not providing work * fast enough messages after every long poll */ pool_tset(cp, &cp->lagging); /* Discard staged work that is now stale */ discard_stale(); rd_lock(&mining_thr_lock); for (i = 0; i < mining_threads; i++) { thr = mining_thr[i]; thr->work_restart = true; } for (i = 0; i < mining_threads; i++) { thr = mining_thr[i]; notifier_wake(thr->work_restart_notifier); } rd_unlock(&mining_thr_lock); } static void blkhashstr(char *rv, const unsigned char *hash) { unsigned char hash_swap[32]; swap256(hash_swap, hash); swap32tole(hash_swap, hash_swap, 32 / 4); bin2hex(rv, hash_swap, 32); } static void set_curblock(char *hexstr, unsigned char *hash) { unsigned char hash_swap[32]; current_block_id = ((uint32_t*)hash)[0]; strcpy(current_block, hexstr); swap256(hash_swap, hash); swap32tole(hash_swap, hash_swap, 32 / 4); cg_wlock(&ch_lock); block_time = time(NULL); __update_block_title(hash_swap); free(current_fullhash); current_fullhash = malloc(65); bin2hex(current_fullhash, hash_swap, 32); get_timestamp(blocktime, sizeof(blocktime), block_time); cg_wunlock(&ch_lock); applog(LOG_INFO, "New block: %s diff %s (%s)", current_hash, block_diff, net_hashrate); } /* Search to see if this string is from a block that has been seen before */ static bool block_exists(char *hexstr) { struct block *s; rd_lock(&blk_lock); HASH_FIND_STR(blocks, hexstr, s); rd_unlock(&blk_lock); if (s) return true; return false; } /* Tests if this work is from a block that has been seen before */ static inline bool from_existing_block(struct work *work) { char hexstr[37]; bool ret; bin2hex(hexstr, work->data + 8, 18); ret = block_exists(hexstr); return ret; } static int block_sort(struct block *blocka, struct block *blockb) { return blocka->block_no - blockb->block_no; } static void set_blockdiff(const struct work *work) { unsigned char target[32]; double diff; uint64_t diff64; real_block_target(target, work->data); diff = target_diff(target); diff64 = diff; suffix_string(diff64, block_diff, sizeof(block_diff), 0); format_unit2(net_hashrate, sizeof(net_hashrate), true, "h/s", H2B_SHORT, diff * 7158278, -1); if (unlikely(current_diff != diff)) applog(LOG_NOTICE, "Network difficulty changed to %s (%s)", block_diff, net_hashrate); current_diff = diff; } static bool test_work_current(struct work *work) { bool ret = true; char hexstr[65]; if (work->mandatory) return ret; uint32_t block_id = ((uint32_t*)(work->data))[1]; /* Hack to work around dud work sneaking into test */ bin2hex(hexstr, work->data + 8, 18); if (!strncmp(hexstr, "000000000000000000000000000000000000", 36)) goto out_free; /* Search to see if this block exists yet and if not, consider it a * new block and set the current block details to this one */ if (!block_exists(hexstr)) { struct block *s = calloc(sizeof(struct block), 1); int deleted_block = 0; ret = false; if (unlikely(!s)) quit (1, "test_work_current OOM"); strcpy(s->hash, hexstr); s->block_no = new_blocks++; wr_lock(&blk_lock); /* Only keep the last hour's worth of blocks in memory since * work from blocks before this is virtually impossible and we * want to prevent memory usage from continually rising */ if (HASH_COUNT(blocks) > 6) { struct block *oldblock; HASH_SORT(blocks, block_sort); oldblock = blocks; deleted_block = oldblock->block_no; HASH_DEL(blocks, oldblock); free(oldblock); } HASH_ADD_STR(blocks, hash, s); set_blockdiff(work); wr_unlock(&blk_lock); work->pool->block_id = block_id; if (deleted_block) applog(LOG_DEBUG, "Deleted block %d from database", deleted_block); #if BLKMAKER_VERSION > 1 template_nonce = 0; #endif set_curblock(hexstr, &work->data[4]); if (unlikely(new_blocks == 1)) goto out_free; if (!work->stratum) { if (work->longpoll) { applog(LOG_NOTICE, "Longpoll from pool %d detected new block", work->pool->pool_no); } else if (have_longpoll) applog(LOG_NOTICE, "New block detected on network before longpoll"); else applog(LOG_NOTICE, "New block detected on network"); } restart_threads(); } else { bool restart = false; struct pool *curpool = NULL; if (unlikely(work->pool->block_id != block_id)) { bool was_active = work->pool->block_id != 0; work->pool->block_id = block_id; if (!work->longpoll) update_last_work(work); if (was_active) { // Pool actively changed block if (work->pool == (curpool = current_pool())) restart = true; if (block_id == current_block_id) { // Caught up, only announce if this pool is the one in use if (restart) applog(LOG_NOTICE, "%s %d caught up to new block", work->longpoll ? "Longpoll from pool" : "Pool", work->pool->pool_no); } else { // Switched to a block we know, but not the latest... why? // This might detect pools trying to double-spend or 51%, // but let's not make any accusations until it's had time // in the real world. blkhashstr(hexstr, &work->data[4]); applog(LOG_WARNING, "%s %d is issuing work for an old block: %s", work->longpoll ? "Longpoll from pool" : "Pool", work->pool->pool_no, hexstr); } } } if (work->longpoll) { ++work->pool->work_restart_id; update_last_work(work); if ((!restart) && work->pool == current_pool()) { applog( (opt_quiet_work_updates ? LOG_DEBUG : LOG_NOTICE), "Longpoll from pool %d requested work update", work->pool->pool_no); restart = true; } } if (restart) restart_threads(); } work->longpoll = false; out_free: return ret; } static int tv_sort(struct work *worka, struct work *workb) { return worka->tv_staged.tv_sec - workb->tv_staged.tv_sec; } static bool work_rollable(struct work *work) { return (!work->clone && work->rolltime); } static bool hash_push(struct work *work) { bool rc = true; mutex_lock(stgd_lock); if (work_rollable(work)) staged_rollable++; if (likely(!getq->frozen)) { HASH_ADD_INT(staged_work, id, work); HASH_SORT(staged_work, tv_sort); } else rc = false; pthread_cond_broadcast(&getq->cond); mutex_unlock(stgd_lock); return rc; } static void stage_work(struct work *work) { applog(LOG_DEBUG, "Pushing work %d from pool %d to hash queue", work->id, work->pool->pool_no); work->work_restart_id = work->pool->work_restart_id; work->pool->last_work_time = time(NULL); cgtime(&work->pool->tv_last_work_time); test_work_current(work); work->pool->works++; hash_push(work); } #ifdef HAVE_CURSES int curses_int(const char *query) { int ret; char *cvar; cvar = curses_input(query); if (unlikely(!cvar)) return -1; ret = atoi(cvar); free(cvar); return ret; } #endif #ifdef HAVE_CURSES static bool input_pool(bool live); #endif #ifdef HAVE_CURSES static void display_pool_summary(struct pool *pool) { double efficiency = 0.0; char xfer[17], bw[19]; int pool_secs; if (curses_active_locked()) { wlog("Pool: %s\n", pool->rpc_url); if (pool->solved) wlog("SOLVED %d BLOCK%s!\n", pool->solved, pool->solved > 1 ? "S" : ""); if (!pool->has_stratum) wlog("%s own long-poll support\n", pool->lp_url ? "Has" : "Does not have"); wlog(" Queued work requests: %d\n", pool->getwork_requested); wlog(" Share submissions: %d\n", pool->accepted + pool->rejected); wlog(" Accepted shares: %d\n", pool->accepted); wlog(" Rejected shares: %d + %d stale (%.2f%%)\n", pool->rejected, pool->stale_shares, (float)(pool->rejected + pool->stale_shares) / (float)(pool->rejected + pool->stale_shares + pool->accepted) ); wlog(" Accepted difficulty shares: %1.f\n", pool->diff_accepted); wlog(" Rejected difficulty shares: %1.f\n", pool->diff_rejected); pool_secs = timer_elapsed(&pool->cgminer_stats.start_tv, NULL); wlog(" Network transfer: %s (%s)\n", multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2, (float)pool->cgminer_pool_stats.net_bytes_received, (float)pool->cgminer_pool_stats.net_bytes_sent), multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2, (float)(pool->cgminer_pool_stats.net_bytes_received / pool_secs), (float)(pool->cgminer_pool_stats.net_bytes_sent / pool_secs))); uint64_t pool_bytes_xfer = pool->cgminer_pool_stats.net_bytes_received + pool->cgminer_pool_stats.net_bytes_sent; efficiency = pool_bytes_xfer ? pool->diff_accepted * 2048. / pool_bytes_xfer : 0.0; wlog(" Efficiency (accepted * difficulty / 2 KB): %.2f\n", efficiency); wlog(" Items worked on: %d\n", pool->works); wlog(" Stale submissions discarded due to new blocks: %d\n", pool->stale_shares); wlog(" Unable to get work from server occasions: %d\n", pool->getfail_occasions); wlog(" Submitting work remotely delay occasions: %d\n\n", pool->remotefail_occasions); unlock_curses(); } } #endif /* We can't remove the memory used for this struct pool because there may * still be work referencing it. We just remove it from the pools list */ void remove_pool(struct pool *pool) { int i, last_pool = total_pools - 1; struct pool *other; /* Boost priority of any lower prio than this one */ for (i = 0; i < total_pools; i++) { other = pools[i]; if (other->prio > pool->prio) other->prio--; } if (pool->pool_no < last_pool) { /* Swap the last pool for this one */ (pools[last_pool])->pool_no = pool->pool_no; pools[pool->pool_no] = pools[last_pool]; } /* Give it an invalid number */ pool->pool_no = total_pools; pool->removed = true; pool->has_stratum = false; total_pools--; } /* add a mutex if this needs to be thread safe in the future */ static struct JE { char *buf; struct JE *next; } *jedata = NULL; static void json_escape_free() { struct JE *jeptr = jedata; struct JE *jenext; jedata = NULL; while (jeptr) { jenext = jeptr->next; free(jeptr->buf); free(jeptr); jeptr = jenext; } } static char *json_escape(const char *str) { struct JE *jeptr; char *buf, *ptr; /* 2x is the max, may as well just allocate that */ ptr = buf = malloc(strlen(str) * 2 + 1); jeptr = malloc(sizeof(*jeptr)); jeptr->buf = buf; jeptr->next = jedata; jedata = jeptr; while (*str) { if (*str == '\\' || *str == '"') *(ptr++) = '\\'; *(ptr++) = *(str++); } *ptr = '\0'; return buf; } void _write_config_temps(FILE *fcfg, const char *configname, size_t settingoffset, size_t defoffset) { int i, commas; int *setp, allset; uint8_t *defp; for (i = 0; ; ++i) { if (i == total_devices) // All defaults return; setp = ((void*)devices[i]) + settingoffset; defp = ((void*)devices[i]) + defoffset; allset = *setp; if (*setp != *defp) break; } fprintf(fcfg, ",\n\"%s\" : \"", configname); for (i = 1; ; ++i) { if (i == total_devices) { // All the same fprintf(fcfg, "%d\"", allset); return; } setp = ((void*)devices[i]) + settingoffset; if (allset != *setp) break; } commas = 0; for (i = 0; i < total_devices; ++i) { setp = ((void*)devices[i]) + settingoffset; defp = ((void*)devices[i]) + defoffset; if (*setp != *defp) { for ( ; commas; --commas) fputs(",", fcfg); fprintf(fcfg, "%d", *setp); } ++commas; } fputs("\"", fcfg); } #define write_config_temps(fcfg, configname, settingname) \ _write_config_temps(fcfg, configname, offsetof(struct cgpu_info, settingname), offsetof(struct cgpu_info, settingname ## _default)) static void _write_config_string_elist(FILE *fcfg, const char *configname, struct string_elist * const elist) { if (!elist) return; static struct string_elist *entry; fprintf(fcfg, ",\n\"%s\" : [", configname); bool first = true; DL_FOREACH(elist, entry) { const char * const s = entry->string; fprintf(fcfg, "%s\n\t\"%s\"", first ? "" : ",", json_escape(s)); first = false; } fprintf(fcfg, "\n]"); } void write_config(FILE *fcfg) { int i; /* Write pool values */ fputs("{\n\"pools\" : [", fcfg); for(i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; if (pool->quota != 1) { fprintf(fcfg, "%s\n\t{\n\t\t\"quota\" : \"%d;%s\",", i > 0 ? "," : "", pool->quota, json_escape(pool->rpc_url)); } else { fprintf(fcfg, "%s\n\t{\n\t\t\"url\" : \"%s\",", i > 0 ? "," : "", json_escape(pool->rpc_url)); } if (pool->rpc_proxy) fprintf(fcfg, "\n\t\t\"pool-proxy\" : \"%s\",", json_escape(pool->rpc_proxy)); fprintf(fcfg, "\n\t\t\"user\" : \"%s\",", json_escape(pool->rpc_user)); fprintf(fcfg, "\n\t\t\"pass\" : \"%s\",", json_escape(pool->rpc_pass)); fprintf(fcfg, "\n\t\t\"pool-priority\" : \"%d\"", pool->prio); if (pool->force_rollntime) fprintf(fcfg, ",\n\t\t\"force-rollntime\" : %d", pool->force_rollntime); fprintf(fcfg, "\n\t}"); } fputs("\n]\n", fcfg); write_config_temps(fcfg, "temp-cutoff", cutofftemp); write_config_temps(fcfg, "temp-target", targettemp); #ifdef HAVE_OPENCL if (nDevs) { /* Write GPU device values */ fputs(",\n\"intensity\" : \"", fcfg); for(i = 0; i < nDevs; i++) { if (i > 0) fputc(',', fcfg); if (gpus[i].dynamic) fputc('d', fcfg); else fprintf(fcfg, "%d", gpus[i].intensity); } fputs("\",\n\"vectors\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", gpus[i].vwidth); fputs("\",\n\"worksize\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", (int)gpus[i].work_size); fputs("\",\n\"kernel\" : \"", fcfg); for(i = 0; i < nDevs; i++) { fprintf(fcfg, "%s", i > 0 ? "," : ""); switch (gpus[i].kernel) { case KL_NONE: // Shouldn't happen break; case KL_POCLBM: fprintf(fcfg, "poclbm"); break; case KL_PHATK: fprintf(fcfg, "phatk"); break; case KL_DIAKGCN: fprintf(fcfg, "diakgcn"); break; case KL_DIABLO: fprintf(fcfg, "diablo"); break; case KL_SCRYPT: fprintf(fcfg, "scrypt"); break; } } #ifdef USE_SCRYPT fputs("\",\n\"lookup-gap\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", (int)gpus[i].opt_lg); fputs("\",\n\"thread-concurrency\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", (int)gpus[i].opt_tc); fputs("\",\n\"shaders\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", (int)gpus[i].shaders); #endif #ifdef HAVE_ADL fputs("\",\n\"gpu-engine\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d-%d", i > 0 ? "," : "", gpus[i].min_engine, gpus[i].gpu_engine); fputs("\",\n\"gpu-fan\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d-%d", i > 0 ? "," : "", gpus[i].min_fan, gpus[i].gpu_fan); fputs("\",\n\"gpu-memclock\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", gpus[i].gpu_memclock); fputs("\",\n\"gpu-memdiff\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", gpus[i].gpu_memdiff); fputs("\",\n\"gpu-powertune\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", gpus[i].gpu_powertune); fputs("\",\n\"gpu-vddc\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%1.3f", i > 0 ? "," : "", gpus[i].gpu_vddc); fputs("\",\n\"temp-overheat\" : \"", fcfg); for(i = 0; i < nDevs; i++) fprintf(fcfg, "%s%d", i > 0 ? "," : "", gpus[i].adl.overtemp); #endif fputs("\"", fcfg); } #endif #ifdef HAVE_ADL if (opt_reorder) fprintf(fcfg, ",\n\"gpu-reorder\" : true"); #endif #ifdef WANT_CPUMINE fprintf(fcfg, ",\n\"algo\" : \"%s\"", algo_names[opt_algo]); #endif /* Simple bool and int options */ struct opt_table *opt; for (opt = opt_config_table; opt->type != OPT_END; opt++) { char *p, *name = strdup(opt->names); for (p = strtok(name, "|"); p; p = strtok(NULL, "|")) { if (p[1] != '-') continue; if (opt->type & OPT_NOARG && ((void *)opt->cb == (void *)opt_set_bool || (void *)opt->cb == (void *)opt_set_invbool) && (*(bool *)opt->u.arg == ((void *)opt->cb == (void *)opt_set_bool))) fprintf(fcfg, ",\n\"%s\" : true", p+2); if (opt->type & OPT_HASARG && ((void *)opt->cb_arg == (void *)set_int_0_to_9999 || (void *)opt->cb_arg == (void *)set_int_1_to_65535 || (void *)opt->cb_arg == (void *)set_int_0_to_10 || (void *)opt->cb_arg == (void *)set_int_1_to_10) && opt->desc != opt_hidden && 0 <= *(int *)opt->u.arg) fprintf(fcfg, ",\n\"%s\" : \"%d\"", p+2, *(int *)opt->u.arg); } } /* Special case options */ if (request_target_str) { if (request_pdiff == (long)request_pdiff) fprintf(fcfg, ",\n\"request-diff\" : %ld", (long)request_pdiff); else fprintf(fcfg, ",\n\"request-diff\" : %f", request_pdiff); } fprintf(fcfg, ",\n\"shares\" : \"%d\"", opt_shares); if (pool_strategy == POOL_BALANCE) fputs(",\n\"balance\" : true", fcfg); if (pool_strategy == POOL_LOADBALANCE) fputs(",\n\"load-balance\" : true", fcfg); if (pool_strategy == POOL_ROUNDROBIN) fputs(",\n\"round-robin\" : true", fcfg); if (pool_strategy == POOL_ROTATE) fprintf(fcfg, ",\n\"rotate\" : \"%d\"", opt_rotate_period); #if defined(unix) || defined(__APPLE__) if (opt_stderr_cmd && *opt_stderr_cmd) fprintf(fcfg, ",\n\"monitor\" : \"%s\"", json_escape(opt_stderr_cmd)); #endif // defined(unix) if (opt_kernel_path && *opt_kernel_path) { char *kpath = strdup(opt_kernel_path); if (kpath[strlen(kpath)-1] == '/') kpath[strlen(kpath)-1] = 0; fprintf(fcfg, ",\n\"kernel-path\" : \"%s\"", json_escape(kpath)); free(kpath); } if (schedstart.enable) fprintf(fcfg, ",\n\"sched-time\" : \"%d:%d\"", schedstart.tm.tm_hour, schedstart.tm.tm_min); if (schedstop.enable) fprintf(fcfg, ",\n\"stop-time\" : \"%d:%d\"", schedstop.tm.tm_hour, schedstop.tm.tm_min); if (opt_socks_proxy && *opt_socks_proxy) fprintf(fcfg, ",\n\"socks-proxy\" : \"%s\"", json_escape(opt_socks_proxy)); _write_config_string_elist(fcfg, "scan", scan_devices); #ifdef USE_LIBMICROHTTPD if (httpsrv_port != -1) fprintf(fcfg, ",\n\"http-port\" : %d", httpsrv_port); #endif #ifdef USE_LIBEVENT if (stratumsrv_port != -1) fprintf(fcfg, ",\n\"stratum-port\" : %d", stratumsrv_port); #endif _write_config_string_elist(fcfg, "device", opt_devices_enabled_list); _write_config_string_elist(fcfg, "set-device", opt_set_device_list); if (opt_api_allow) fprintf(fcfg, ",\n\"api-allow\" : \"%s\"", json_escape(opt_api_allow)); if (strcmp(opt_api_mcast_addr, API_MCAST_ADDR) != 0) fprintf(fcfg, ",\n\"api-mcast-addr\" : \"%s\"", json_escape(opt_api_mcast_addr)); if (strcmp(opt_api_mcast_code, API_MCAST_CODE) != 0) fprintf(fcfg, ",\n\"api-mcast-code\" : \"%s\"", json_escape(opt_api_mcast_code)); if (*opt_api_mcast_des) fprintf(fcfg, ",\n\"api-mcast-des\" : \"%s\"", json_escape(opt_api_mcast_des)); if (strcmp(opt_api_description, PACKAGE_STRING) != 0) fprintf(fcfg, ",\n\"api-description\" : \"%s\"", json_escape(opt_api_description)); if (opt_api_groups) fprintf(fcfg, ",\n\"api-groups\" : \"%s\"", json_escape(opt_api_groups)); if (opt_icarus_options) fprintf(fcfg, ",\n\"icarus-options\" : \"%s\"", json_escape(opt_icarus_options)); if (opt_icarus_timing) fprintf(fcfg, ",\n\"icarus-timing\" : \"%s\"", json_escape(opt_icarus_timing)); #ifdef USE_KLONDIKE if (opt_klondike_options) fprintf(fcfg, ",\n\"klondike-options\" : \"%s\"", json_escape(opt_klondike_options)); #endif fputs("\n}\n", fcfg); json_escape_free(); } void zero_bestshare(void) { int i; best_diff = 0; memset(best_share, 0, 8); suffix_string(best_diff, best_share, sizeof(best_share), 0); for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; pool->best_diff = 0; } } void zero_stats(void) { int i; applog(LOG_DEBUG, "Zeroing stats"); cgtime(&total_tv_start); miner_started = total_tv_start; total_rolling = 0; total_mhashes_done = 0; total_getworks = 0; total_accepted = 0; total_rejected = 0; hw_errors = 0; total_stale = 0; total_discarded = 0; total_bytes_rcvd = total_bytes_sent = 0; new_blocks = 0; local_work = 0; total_go = 0; total_ro = 0; total_secs = 1.0; total_diff1 = 0; total_bad_nonces = 0; found_blocks = 0; total_diff_accepted = 0; total_diff_rejected = 0; total_diff_stale = 0; #ifdef HAVE_CURSES awidth = rwidth = swidth = hwwidth = 1; #endif for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; pool->getwork_requested = 0; pool->accepted = 0; pool->rejected = 0; pool->solved = 0; pool->getwork_requested = 0; pool->stale_shares = 0; pool->discarded_work = 0; pool->getfail_occasions = 0; pool->remotefail_occasions = 0; pool->last_share_time = 0; pool->diff1 = 0; pool->diff_accepted = 0; pool->diff_rejected = 0; pool->diff_stale = 0; pool->last_share_diff = 0; pool->cgminer_stats.start_tv = total_tv_start; pool->cgminer_stats.getwork_calls = 0; pool->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; pool->cgminer_stats.getwork_wait_max.tv_sec = 0; pool->cgminer_stats.getwork_wait_max.tv_usec = 0; pool->cgminer_pool_stats.getwork_calls = 0; pool->cgminer_pool_stats.getwork_attempts = 0; pool->cgminer_pool_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; pool->cgminer_pool_stats.getwork_wait_max.tv_sec = 0; pool->cgminer_pool_stats.getwork_wait_max.tv_usec = 0; pool->cgminer_pool_stats.min_diff = 0; pool->cgminer_pool_stats.max_diff = 0; pool->cgminer_pool_stats.min_diff_count = 0; pool->cgminer_pool_stats.max_diff_count = 0; pool->cgminer_pool_stats.times_sent = 0; pool->cgminer_pool_stats.bytes_sent = 0; pool->cgminer_pool_stats.net_bytes_sent = 0; pool->cgminer_pool_stats.times_received = 0; pool->cgminer_pool_stats.bytes_received = 0; pool->cgminer_pool_stats.net_bytes_received = 0; } zero_bestshare(); for (i = 0; i < total_devices; ++i) { struct cgpu_info *cgpu = get_devices(i); mutex_lock(&hash_lock); cgpu->total_mhashes = 0; cgpu->accepted = 0; cgpu->rejected = 0; cgpu->stale = 0; cgpu->hw_errors = 0; cgpu->utility = 0.0; cgpu->utility_diff1 = 0; cgpu->last_share_pool_time = 0; cgpu->bad_nonces = 0; cgpu->diff1 = 0; cgpu->diff_accepted = 0; cgpu->diff_rejected = 0; cgpu->diff_stale = 0; cgpu->last_share_diff = 0; cgpu->thread_fail_init_count = 0; cgpu->thread_zero_hash_count = 0; cgpu->thread_fail_queue_count = 0; cgpu->dev_sick_idle_60_count = 0; cgpu->dev_dead_idle_600_count = 0; cgpu->dev_nostart_count = 0; cgpu->dev_over_heat_count = 0; cgpu->dev_thermal_cutoff_count = 0; cgpu->dev_comms_error_count = 0; cgpu->dev_throttle_count = 0; cgpu->cgminer_stats.start_tv = total_tv_start; cgpu->cgminer_stats.getwork_calls = 0; cgpu->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; cgpu->cgminer_stats.getwork_wait_max.tv_sec = 0; cgpu->cgminer_stats.getwork_wait_max.tv_usec = 0; mutex_unlock(&hash_lock); } } #ifdef HAVE_CURSES static void loginput_mode(const int size) { clear_logwin(); loginput_size = size; check_winsizes(); } static void display_pools(void) { struct pool *pool; int selected, i, j; char input; loginput_mode(7 + total_pools); immedok(logwin, true); updated: for (j = 0; j < total_pools; j++) { for (i = 0; i < total_pools; i++) { pool = pools[i]; if (pool->prio != j) continue; if (pool == current_pool()) wattron(logwin, A_BOLD); if (pool->enabled != POOL_ENABLED) wattron(logwin, A_DIM); wlogprint("%d: ", pool->prio); switch (pool->enabled) { case POOL_ENABLED: wlogprint("Enabled "); break; case POOL_DISABLED: wlogprint("Disabled "); break; case POOL_REJECTING: wlogprint("Rejectin "); break; } if (pool->idle) wlogprint("Dead "); else if (pool->has_stratum) wlogprint("Strtm"); else if (pool->lp_url && pool->proto != pool->lp_proto) wlogprint("Mixed"); else switch (pool->proto) { case PLP_GETBLOCKTEMPLATE: wlogprint(" GBT "); break; case PLP_GETWORK: wlogprint("GWork"); break; default: wlogprint("Alive"); } wlogprint(" Quota %d Pool %d: %s User:%s\n", pool->quota, pool->pool_no, pool->rpc_url, pool->rpc_user); wattroff(logwin, A_BOLD | A_DIM); break; //for (i = 0; i < total_pools; i++) } } retry: wlogprint("\nCurrent pool management strategy: %s\n", strategies[pool_strategy].s); if (pool_strategy == POOL_ROTATE) wlogprint("Set to rotate every %d minutes\n", opt_rotate_period); wlogprint("[F]ailover only %s\n", opt_fail_only ? "enabled" : "disabled"); wlogprint("Pool [A]dd [R]emove [D]isable [E]nable [P]rioritize [Q]uota change\n"); wlogprint("[C]hange management strategy [S]witch pool [I]nformation\n"); wlogprint("Or press any other key to continue\n"); logwin_update(); input = getch(); if (!strncasecmp(&input, "a", 1)) { input_pool(true); goto updated; } else if (!strncasecmp(&input, "r", 1)) { if (total_pools <= 1) { wlogprint("Cannot remove last pool"); goto retry; } selected = curses_int("Select pool number"); if (selected < 0 || selected >= total_pools) { wlogprint("Invalid selection\n"); goto retry; } pool = pools[selected]; if (pool == current_pool()) switch_pools(NULL); if (pool == current_pool()) { wlogprint("Unable to remove pool due to activity\n"); goto retry; } disable_pool(pool); remove_pool(pool); goto updated; } else if (!strncasecmp(&input, "s", 1)) { selected = curses_int("Select pool number"); if (selected < 0 || selected >= total_pools) { wlogprint("Invalid selection\n"); goto retry; } pool = pools[selected]; enable_pool(pool); switch_pools(pool); goto updated; } else if (!strncasecmp(&input, "d", 1)) { if (enabled_pools <= 1) { wlogprint("Cannot disable last pool"); goto retry; } selected = curses_int("Select pool number"); if (selected < 0 || selected >= total_pools) { wlogprint("Invalid selection\n"); goto retry; } pool = pools[selected]; disable_pool(pool); if (pool == current_pool()) switch_pools(NULL); goto updated; } else if (!strncasecmp(&input, "e", 1)) { selected = curses_int("Select pool number"); if (selected < 0 || selected >= total_pools) { wlogprint("Invalid selection\n"); goto retry; } pool = pools[selected]; enable_pool(pool); if (pool->prio < current_pool()->prio) switch_pools(pool); goto updated; } else if (!strncasecmp(&input, "c", 1)) { for (i = 0; i <= TOP_STRATEGY; i++) wlogprint("%d: %s\n", i, strategies[i].s); selected = curses_int("Select strategy number type"); if (selected < 0 || selected > TOP_STRATEGY) { wlogprint("Invalid selection\n"); goto retry; } if (selected == POOL_ROTATE) { opt_rotate_period = curses_int("Select interval in minutes"); if (opt_rotate_period < 0 || opt_rotate_period > 9999) { opt_rotate_period = 0; wlogprint("Invalid selection\n"); goto retry; } } pool_strategy = selected; switch_pools(NULL); goto updated; } else if (!strncasecmp(&input, "i", 1)) { selected = curses_int("Select pool number"); if (selected < 0 || selected >= total_pools) { wlogprint("Invalid selection\n"); goto retry; } pool = pools[selected]; display_pool_summary(pool); goto retry; } else if (!strncasecmp(&input, "q", 1)) { selected = curses_int("Select pool number"); if (selected < 0 || selected >= total_pools) { wlogprint("Invalid selection\n"); goto retry; } pool = pools[selected]; selected = curses_int("Set quota"); if (selected < 0) { wlogprint("Invalid negative quota\n"); goto retry; } pool->quota = selected; adjust_quota_gcd(); goto updated; } else if (!strncasecmp(&input, "f", 1)) { opt_fail_only ^= true; goto updated; } else if (!strncasecmp(&input, "p", 1)) { char *prilist = curses_input("Enter new pool priority (comma separated list)"); if (!prilist) { wlogprint("Not changing priorities\n"); goto retry; } int res = prioritize_pools(prilist, &i); free(prilist); switch (res) { case MSG_NOPOOL: wlogprint("No pools\n"); goto retry; case MSG_MISPID: wlogprint("Missing pool id parameter\n"); goto retry; case MSG_INVPID: wlogprint("Invalid pool id %d - range is 0 - %d\n", i, total_pools - 1); goto retry; case MSG_DUPPID: wlogprint("Duplicate pool specified %d\n", i); goto retry; case MSG_POOLPRIO: default: goto updated; } } immedok(logwin, false); loginput_mode(0); } static const char *summary_detail_level_str(void) { if (opt_compact) return "compact"; if (opt_show_procs) return "processors"; return "devices"; } static void display_options(void) { int selected; char input; immedok(logwin, true); loginput_mode(12); retry: clear_logwin(); wlogprint("[N]ormal [C]lear [S]ilent mode (disable all output)\n"); wlogprint("[D]ebug:%s\n[P]er-device:%s\n[Q]uiet:%s\n[V]erbose:%s\n" "[R]PC debug:%s\n[W]orkTime details:%s\nsu[M]mary detail level:%s\n" "[L]og interval:%d\nS[T]atistical counts: %s\n[Z]ero statistics\n", opt_debug_console ? "on" : "off", want_per_device_stats? "on" : "off", opt_quiet ? "on" : "off", opt_log_output ? "on" : "off", opt_protocol ? "on" : "off", opt_worktime ? "on" : "off", summary_detail_level_str(), opt_log_interval, opt_weighed_stats ? "weighed" : "absolute"); wlogprint("Select an option or any other key to return\n"); logwin_update(); input = getch(); if (!strncasecmp(&input, "q", 1)) { opt_quiet ^= true; wlogprint("Quiet mode %s\n", opt_quiet ? "enabled" : "disabled"); goto retry; } else if (!strncasecmp(&input, "v", 1)) { opt_log_output ^= true; if (opt_log_output) opt_quiet = false; wlogprint("Verbose mode %s\n", opt_log_output ? "enabled" : "disabled"); goto retry; } else if (!strncasecmp(&input, "n", 1)) { opt_log_output = false; opt_debug_console = false; opt_quiet = false; opt_protocol = false; opt_compact = false; opt_show_procs = false; devsummaryYOffset = 0; want_per_device_stats = false; wlogprint("Output mode reset to normal\n"); switch_logsize(); goto retry; } else if (!strncasecmp(&input, "d", 1)) { opt_debug = true; opt_debug_console ^= true; opt_log_output = opt_debug_console; if (opt_debug_console) opt_quiet = false; wlogprint("Debug mode %s\n", opt_debug_console ? "enabled" : "disabled"); goto retry; } else if (!strncasecmp(&input, "m", 1)) { if (opt_compact) opt_compact = false; else if (!opt_show_procs) opt_show_procs = true; else { opt_compact = true; opt_show_procs = false; devsummaryYOffset = 0; } wlogprint("su[M]mary detail level changed to: %s\n", summary_detail_level_str()); switch_logsize(); goto retry; } else if (!strncasecmp(&input, "p", 1)) { want_per_device_stats ^= true; opt_log_output = want_per_device_stats; wlogprint("Per-device stats %s\n", want_per_device_stats ? "enabled" : "disabled"); goto retry; } else if (!strncasecmp(&input, "r", 1)) { opt_protocol ^= true; if (opt_protocol) opt_quiet = false; wlogprint("RPC protocol debugging %s\n", opt_protocol ? "enabled" : "disabled"); goto retry; } else if (!strncasecmp(&input, "c", 1)) clear_logwin(); else if (!strncasecmp(&input, "l", 1)) { selected = curses_int("Interval in seconds"); if (selected < 0 || selected > 9999) { wlogprint("Invalid selection\n"); goto retry; } opt_log_interval = selected; wlogprint("Log interval set to %d seconds\n", opt_log_interval); goto retry; } else if (!strncasecmp(&input, "s", 1)) { opt_realquiet = true; } else if (!strncasecmp(&input, "w", 1)) { opt_worktime ^= true; wlogprint("WorkTime details %s\n", opt_worktime ? "enabled" : "disabled"); goto retry; } else if (!strncasecmp(&input, "t", 1)) { opt_weighed_stats ^= true; wlogprint("Now displaying %s statistics\n", opt_weighed_stats ? "weighed" : "absolute"); goto retry; } else if (!strncasecmp(&input, "z", 1)) { zero_stats(); goto retry; } immedok(logwin, false); loginput_mode(0); } #endif void default_save_file(char *filename) { #if defined(unix) || defined(__APPLE__) if (getenv("HOME") && *getenv("HOME")) { strcpy(filename, getenv("HOME")); strcat(filename, "/"); } else strcpy(filename, ""); strcat(filename, ".bfgminer/"); mkdir(filename, 0777); #else strcpy(filename, ""); #endif strcat(filename, def_conf); } #ifdef HAVE_CURSES static void set_options(void) { int selected; char input; immedok(logwin, true); loginput_mode(8); retry: wlogprint("\n[L]ongpoll: %s\n", want_longpoll ? "On" : "Off"); wlogprint("[Q]ueue: %d\n[S]cantime: %d\n[E]xpiry: %d\n[R]etries: %d\n" "[W]rite config file\n[B]FGMiner restart\n", opt_queue, opt_scantime, opt_expiry, opt_retries); wlogprint("Select an option or any other key to return\n"); logwin_update(); input = getch(); if (!strncasecmp(&input, "q", 1)) { selected = curses_int("Extra work items to queue"); if (selected < 0 || selected > 9999) { wlogprint("Invalid selection\n"); goto retry; } opt_queue = selected; goto retry; } else if (!strncasecmp(&input, "l", 1)) { if (want_longpoll) stop_longpoll(); else start_longpoll(); applog(LOG_WARNING, "Longpoll %s", want_longpoll ? "enabled" : "disabled"); goto retry; } else if (!strncasecmp(&input, "s", 1)) { selected = curses_int("Set scantime in seconds"); if (selected < 0 || selected > 9999) { wlogprint("Invalid selection\n"); goto retry; } opt_scantime = selected; goto retry; } else if (!strncasecmp(&input, "e", 1)) { selected = curses_int("Set expiry time in seconds"); if (selected < 0 || selected > 9999) { wlogprint("Invalid selection\n"); goto retry; } opt_expiry = selected; goto retry; } else if (!strncasecmp(&input, "r", 1)) { selected = curses_int("Retries before failing (-1 infinite)"); if (selected < -1 || selected > 9999) { wlogprint("Invalid selection\n"); goto retry; } opt_retries = selected; goto retry; } else if (!strncasecmp(&input, "w", 1)) { FILE *fcfg; char *str, filename[PATH_MAX], prompt[PATH_MAX + 50]; default_save_file(filename); snprintf(prompt, sizeof(prompt), "Config filename to write (Enter for default) [%s]", filename); str = curses_input(prompt); if (str) { struct stat statbuf; strcpy(filename, str); free(str); if (!stat(filename, &statbuf)) { wlogprint("File exists, overwrite?\n"); input = getch(); if (strncasecmp(&input, "y", 1)) goto retry; } } fcfg = fopen(filename, "w"); if (!fcfg) { wlogprint("Cannot open or create file\n"); goto retry; } write_config(fcfg); fclose(fcfg); goto retry; } else if (!strncasecmp(&input, "b", 1)) { wlogprint("Are you sure?\n"); input = getch(); if (!strncasecmp(&input, "y", 1)) app_restart(); else clear_logwin(); } else clear_logwin(); loginput_mode(0); immedok(logwin, false); } int scan_serial(const char *); static void _managetui_msg(const char *repr, const char **msg) { if (*msg) { applog(LOG_DEBUG, "ManageTUI: %"PRIpreprv": %s", repr, *msg); wattron(logwin, A_BOLD); wlogprint("%s", *msg); wattroff(logwin, A_BOLD); *msg = NULL; } logwin_update(); } void manage_device(void) { char logline[256]; const char *msg = NULL; struct cgpu_info *cgpu; const struct device_drv *drv; selecting_device = true; immedok(logwin, true); loginput_mode(12); devchange: if (unlikely(!total_devices)) { clear_logwin(); wlogprint("(no devices)\n"); wlogprint("[Plus] Add device(s) [Enter] Close device manager\n"); _managetui_msg("(none)", &msg); int input = getch(); switch (input) { case '+': case '=': // add new device goto addnew; default: goto out; } } cgpu = devices[selected_device]; drv = cgpu->drv; refresh_devstatus(); refresh: clear_logwin(); wlogprint("Select processor to manage using up/down arrow keys\n"); get_statline3(logline, sizeof(logline), cgpu, true, true); wattron(logwin, A_BOLD); wlogprint("%s", logline); wattroff(logwin, A_BOLD); wlogprint("\n"); if (cgpu->dev_manufacturer) wlogprint(" %s from %s\n", (cgpu->dev_product ?: "Device"), cgpu->dev_manufacturer); else if (cgpu->dev_product) wlogprint(" %s\n", cgpu->dev_product); if (cgpu->dev_serial) wlogprint("Serial: %s\n", cgpu->dev_serial); if (cgpu->kname) wlogprint("Kernel: %s\n", cgpu->kname); if (drv->proc_wlogprint_status && likely(cgpu->status != LIFE_INIT)) drv->proc_wlogprint_status(cgpu); wlogprint("\n"); // TODO: Last share at TIMESTAMP on pool N // TODO: Custom device info/commands if (cgpu->deven != DEV_ENABLED) wlogprint("[E]nable "); if (cgpu->deven != DEV_DISABLED) wlogprint("[D]isable "); if (drv->identify_device) wlogprint("[I]dentify "); if (drv->proc_tui_wlogprint_choices && likely(cgpu->status != LIFE_INIT)) drv->proc_tui_wlogprint_choices(cgpu); wlogprint("\n"); wlogprint("[Slash] Find processor [Plus] Add device(s) [Enter] Close device manager\n"); _managetui_msg(cgpu->proc_repr, &msg); while (true) { int input = getch(); applog(LOG_DEBUG, "ManageTUI: %"PRIpreprv": (choice %d)", cgpu->proc_repr, input); switch (input) { case 'd': case 'D': if (cgpu->deven == DEV_DISABLED) msg = "Processor already disabled\n"; else { cgpu->deven = DEV_DISABLED; msg = "Processor being disabled\n"; } goto refresh; case 'e': case 'E': if (cgpu->deven == DEV_ENABLED) msg = "Processor already enabled\n"; else { proc_enable(cgpu); msg = "Processor being enabled\n"; } goto refresh; case 'i': case 'I': if (drv->identify_device && drv->identify_device(cgpu)) msg = "Identify command sent\n"; else goto key_default; goto refresh; case KEY_DOWN: if (selected_device >= total_devices - 1) break; ++selected_device; goto devchange; case KEY_UP: if (selected_device <= 0) break; --selected_device; goto devchange; case KEY_NPAGE: { if (selected_device >= total_devices - 1) break; struct cgpu_info *mdev = devices[selected_device]->device; do { ++selected_device; } while (devices[selected_device]->device == mdev && selected_device < total_devices - 1); goto devchange; } case KEY_PPAGE: { if (selected_device <= 0) break; struct cgpu_info *mdev = devices[selected_device]->device; do { --selected_device; } while (devices[selected_device]->device == mdev && selected_device > 0); goto devchange; } case '/': case '?': // find device { static char *pattern = NULL; char *newpattern = curses_input("Enter pattern"); if (newpattern) { free(pattern); pattern = newpattern; } else if (!pattern) pattern = calloc(1, 1); int match = cgpu_search(pattern, selected_device + 1); if (match == -1) { msg = "Couldn't find device\n"; goto refresh; } selected_device = match; goto devchange; } case '+': case '=': // add new device { addnew: clear_logwin(); _wlogprint( "Enter \"auto\", \"all\", or a serial port to probe for mining devices.\n" "Prefix by a driver name and colon to only probe a specific driver.\n" "For example: erupter:" #ifdef WIN32 "\\\\.\\COM40" #elif defined(__APPLE__) "/dev/cu.SLAB_USBtoUART" #else "/dev/ttyUSB39" #endif "\n" ); char *scanser = curses_input("Enter target"); if (scan_serial(scanser)) { selected_device = total_devices - 1; msg = "Device scan succeeded\n"; } else msg = "No new devices found\n"; goto devchange; } case 'Q': case 'q': case KEY_BREAK: case KEY_BACKSPACE: case KEY_CANCEL: case KEY_CLOSE: case KEY_EXIT: case '\x1b': // ESC case KEY_ENTER: case '\r': // Ctrl-M on Windows, with nonl #ifdef PADENTER case PADENTER: // pdcurses, used by Enter key on Windows with nonl #endif case '\n': goto out; default: ; key_default: if (drv->proc_tui_handle_choice && likely(drv_ready(cgpu))) { msg = drv->proc_tui_handle_choice(cgpu, input); if (msg) goto refresh; } } } out: selecting_device = false; loginput_mode(0); immedok(logwin, false); } void show_help(void) { loginput_mode(10); // NOTE: wlogprint is a macro with a buffer limit _wlogprint( "ST: work in queue | F: network fails | NB: new blocks detected\n" "AS: shares being submitted | BW: bandwidth (up/down)\n" "E: # shares * diff per 2kB bw | U: shares/minute | BS: best share ever found\n" U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_BTEE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_BTEE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE U8_HLINE "\n" "devices/processors hashing (only for totals line), hottest temperature\n" ); wlogprint( "hashrates: %ds decaying / all-time average / all-time average (effective)\n" , opt_log_interval); _wlogprint( "A: accepted shares | R: rejected+discarded(%% of total)\n" "HW: hardware errors / %% nonces invalid\n" "\n" "Press any key to clear" ); logwin_update(); getch(); loginput_mode(0); } static void *input_thread(void __maybe_unused *userdata) { RenameThread("input"); if (!curses_active) return NULL; while (1) { int input; input = getch(); switch (input) { case 'h': case 'H': case '?': case KEY_F(1): show_help(); break; case 'q': case 'Q': kill_work(); return NULL; case 'd': case 'D': display_options(); break; case 'm': case 'M': manage_device(); break; case 'p': case 'P': display_pools(); break; case 's': case 'S': set_options(); break; #ifdef HAVE_CURSES case KEY_DOWN: { const int visible_lines = logcursor - devcursor; const int invisible_lines = total_lines - visible_lines; if (devsummaryYOffset <= -invisible_lines) break; devsummaryYOffset -= 2; } case KEY_UP: if (devsummaryYOffset == 0) break; ++devsummaryYOffset; refresh_devstatus(); break; case KEY_NPAGE: { const int visible_lines = logcursor - devcursor; const int invisible_lines = total_lines - visible_lines; if (devsummaryYOffset - visible_lines <= -invisible_lines) devsummaryYOffset = -invisible_lines; else devsummaryYOffset -= visible_lines; refresh_devstatus(); break; } case KEY_PPAGE: { const int visible_lines = logcursor - devcursor; if (devsummaryYOffset + visible_lines >= 0) devsummaryYOffset = 0; else devsummaryYOffset += visible_lines; refresh_devstatus(); break; } #endif } if (opt_realquiet) { disable_curses(); break; } } return NULL; } #endif static void *api_thread(void *userdata) { struct thr_info *mythr = userdata; pthread_detach(pthread_self()); pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); RenameThread("rpc"); api(api_thr_id); mythr->has_pth = false; return NULL; } void thread_reportin(struct thr_info *thr) { cgtime(&thr->last); thr->cgpu->status = LIFE_WELL; thr->getwork = 0; thr->cgpu->device_last_well = time(NULL); } void thread_reportout(struct thr_info *thr) { thr->getwork = time(NULL); } static void hashmeter(int thr_id, struct timeval *diff, uint64_t hashes_done) { char logstatusline[256]; struct timeval temp_tv_end, total_diff; double secs; double local_secs; static double local_mhashes_done = 0; double local_mhashes = (double)hashes_done / 1000000.0; bool showlog = false; char cHr[h2bs_fmt_size[H2B_NOUNIT]], aHr[h2bs_fmt_size[H2B_NOUNIT]], uHr[h2bs_fmt_size[H2B_SPACED]]; char rejpcbuf[6]; char bnbuf[6]; struct thr_info *thr; /* Update the last time this thread reported in */ if (thr_id >= 0) { thr = get_thread(thr_id); cgtime(&(thr->last)); thr->cgpu->device_last_well = time(NULL); } secs = (double)diff->tv_sec + ((double)diff->tv_usec / 1000000.0); /* So we can call hashmeter from a non worker thread */ if (thr_id >= 0) { struct cgpu_info *cgpu = thr->cgpu; int threadobj = cgpu->threads ?: 1; double thread_rolling = 0.0; int i; applog(LOG_DEBUG, "[thread %d: %"PRIu64" hashes, %.1f khash/sec]", thr_id, hashes_done, hashes_done / 1000 / secs); /* Rolling average for each thread and each device */ decay_time(&thr->rolling, local_mhashes / secs, secs); for (i = 0; i < threadobj; i++) thread_rolling += cgpu->thr[i]->rolling; mutex_lock(&hash_lock); decay_time(&cgpu->rolling, thread_rolling, secs); cgpu->total_mhashes += local_mhashes; mutex_unlock(&hash_lock); // If needed, output detailed, per-device stats if (want_per_device_stats) { struct timeval now; struct timeval elapsed; struct timeval *last_msg_tv = opt_show_procs ? &thr->cgpu->last_message_tv : &thr->cgpu->device->last_message_tv; cgtime(&now); timersub(&now, last_msg_tv, &elapsed); if (opt_log_interval <= elapsed.tv_sec) { struct cgpu_info *cgpu = thr->cgpu; char logline[255]; *last_msg_tv = now; get_statline(logline, sizeof(logline), cgpu); if (!curses_active) { printf("%s \r", logline); fflush(stdout); } else applog(LOG_INFO, "%s", logline); } } } /* Totals are updated by all threads so can race without locking */ mutex_lock(&hash_lock); cgtime(&temp_tv_end); timersub(&temp_tv_end, &total_tv_end, &total_diff); total_mhashes_done += local_mhashes; local_mhashes_done += local_mhashes; /* Only update with opt_log_interval */ if (total_diff.tv_sec < opt_log_interval) goto out_unlock; showlog = true; cgtime(&total_tv_end); local_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0); decay_time(&total_rolling, local_mhashes_done / local_secs, local_secs); global_hashrate = ((unsigned long long)lround(total_rolling)) * 1000000; timersub(&total_tv_end, &total_tv_start, &total_diff); total_secs = (double)total_diff.tv_sec + ((double)total_diff.tv_usec / 1000000.0); double wtotal = (total_diff_accepted + total_diff_rejected + total_diff_stale); multi_format_unit_array2( ((char*[]){cHr, aHr, uHr}), ((size_t[]){h2bs_fmt_size[H2B_NOUNIT], h2bs_fmt_size[H2B_NOUNIT], h2bs_fmt_size[H2B_SPACED]}), true, "h/s", H2B_SHORT, 3, 1e6*total_rolling, 1e6*total_mhashes_done / total_secs, utility_to_hashrate(total_diff1 * (wtotal ? (total_diff_accepted / wtotal) : 1) * 60 / total_secs)); int ui_accepted, ui_rejected, ui_stale; if (opt_weighed_stats) { ui_accepted = total_diff_accepted; ui_rejected = total_diff_rejected; ui_stale = total_diff_stale; } else { ui_accepted = total_accepted; ui_rejected = total_rejected; ui_stale = total_stale; } #ifdef HAVE_CURSES if (curses_active_locked()) { float temp = 0; struct cgpu_info *proc, *last_working_dev = NULL; int i, working_devs = 0, working_procs = 0; int divx; bool bad = false; // Find the highest temperature of all processors for (i = 0; i < total_devices; ++i) { proc = get_devices(i); if (proc->temp > temp) temp = proc->temp; if (unlikely(proc->deven == DEV_DISABLED)) ; // Just need to block it off from both conditions else if (likely(proc->status == LIFE_WELL && proc->deven == DEV_ENABLED)) { if (proc->rolling > .1) { ++working_procs; if (proc->device != last_working_dev) { ++working_devs; last_working_dev = proc->device; } } } else bad = true; } if (working_devs == working_procs) snprintf(statusline, sizeof(statusline), "%s%d ", bad ? U8_BAD_START : "", working_devs); else snprintf(statusline, sizeof(statusline), "%s%d/%d ", bad ? U8_BAD_START : "", working_devs, working_procs); divx = 7; if (opt_show_procs && !opt_compact) ++divx; if (bad) { divx += sizeof(U8_BAD_START)-1; strcpy(&statusline[divx], U8_BAD_END); divx += sizeof(U8_BAD_END)-1; } temperature_column(&statusline[divx], sizeof(statusline)-divx, true, &temp); format_statline(statusline, sizeof(statusline), cHr, aHr, uHr, ui_accepted, ui_rejected, ui_stale, total_diff_rejected + total_diff_stale, total_diff_accepted, hw_errors, total_bad_nonces, total_bad_nonces + total_diff1); unlock_curses(); } #endif // Add a space memmove(&uHr[6], &uHr[5], strlen(&uHr[5]) + 1); uHr[5] = ' '; percentf4(rejpcbuf, sizeof(rejpcbuf), total_diff_rejected + total_diff_stale, total_diff_accepted); percentf4(bnbuf, sizeof(bnbuf), total_bad_nonces, total_diff1); snprintf(logstatusline, sizeof(logstatusline), "%s%ds:%s avg:%s u:%s | A:%d R:%d+%d(%s) HW:%d/%s", want_per_device_stats ? "ALL " : "", opt_log_interval, cHr, aHr, uHr, ui_accepted, ui_rejected, ui_stale, rejpcbuf, hw_errors, bnbuf ); local_mhashes_done = 0; out_unlock: mutex_unlock(&hash_lock); if (showlog) { if (!curses_active) { printf("%s \r", logstatusline); fflush(stdout); } else applog(LOG_INFO, "%s", logstatusline); } } void hashmeter2(struct thr_info *thr) { struct timeval tv_now, tv_elapsed; timerclear(&thr->tv_hashes_done); cgtime(&tv_now); timersub(&tv_now, &thr->tv_lastupdate, &tv_elapsed); /* Update the hashmeter at most 5 times per second */ if ((thr->hashes_done && (tv_elapsed.tv_sec > 0 || tv_elapsed.tv_usec > 200000)) || tv_elapsed.tv_sec >= opt_log_interval) { hashmeter(thr->id, &tv_elapsed, thr->hashes_done); thr->hashes_done = 0; thr->tv_lastupdate = tv_now; } } static void stratum_share_result(json_t *val, json_t *res_val, json_t *err_val, struct stratum_share *sshare) { struct work *work = sshare->work; share_result(val, res_val, err_val, work, false, ""); } /* Parses stratum json responses and tries to find the id that the request * matched to and treat it accordingly. */ bool parse_stratum_response(struct pool *pool, char *s) { json_t *val = NULL, *err_val, *res_val, *id_val; struct stratum_share *sshare; json_error_t err; bool ret = false; int id; val = JSON_LOADS(s, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); id_val = json_object_get(val, "id"); if (json_is_null(id_val) || !id_val) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC non method decode failed: %s", ss); free(ss); goto out; } if (!json_is_integer(id_val)) { if (json_is_string(id_val) && !strncmp(json_string_value(id_val), "txlist", 6)) { const bool is_array = json_is_array(res_val); applog(LOG_DEBUG, "Received %s for pool %u job %s", is_array ? "transaction list" : "no-transaction-list response", pool->pool_no, &json_string_value(id_val)[6]); if (strcmp(json_string_value(id_val) + 6, pool->swork.job_id) || !is_array) // We only care about a transaction list for the current job id goto fishy; // Check that the transactions actually hash to the merkle links { unsigned maxtx = 1 << pool->swork.merkles; unsigned mintx = maxtx >> 1; --maxtx; unsigned acttx = (unsigned)json_array_size(res_val); if (acttx < mintx || acttx > maxtx) { applog(LOG_WARNING, "Pool %u is sending mismatched block contents to us (%u is not %u-%u)", pool->pool_no, acttx, mintx, maxtx); goto fishy; } // TODO: Check hashes match actual merkle links } pool_set_opaque(pool, false); timer_unset(&pool->swork.tv_transparency); fishy: ret = true; } goto out; } id = json_integer_value(id_val); mutex_lock(&sshare_lock); HASH_FIND_INT(stratum_shares, &id, sshare); if (sshare) HASH_DEL(stratum_shares, sshare); mutex_unlock(&sshare_lock); if (!sshare) { double pool_diff; /* Since the share is untracked, we can only guess at what the * work difficulty is based on the current pool diff. */ cg_rlock(&pool->data_lock); pool_diff = pool->swork.diff; cg_runlock(&pool->data_lock); if (json_is_true(res_val)) { applog(LOG_NOTICE, "Accepted untracked stratum share from pool %d", pool->pool_no); /* We don't know what device this came from so we can't * attribute the work to the relevant cgpu */ mutex_lock(&stats_lock); total_accepted++; pool->accepted++; total_diff_accepted += pool_diff; pool->diff_accepted += pool_diff; mutex_unlock(&stats_lock); } else { applog(LOG_NOTICE, "Rejected untracked stratum share from pool %d", pool->pool_no); mutex_lock(&stats_lock); total_rejected++; pool->rejected++; total_diff_rejected += pool_diff; pool->diff_rejected += pool_diff; mutex_unlock(&stats_lock); } goto out; } else { mutex_lock(&submitting_lock); --total_submitting; mutex_unlock(&submitting_lock); } stratum_share_result(val, res_val, err_val, sshare); free_work(sshare->work); free(sshare); ret = true; out: if (val) json_decref(val); return ret; } static void shutdown_stratum(struct pool *pool) { // Shut down Stratum as if we never had it pool->stratum_active = false; pool->stratum_init = false; pool->has_stratum = false; shutdown(pool->sock, SHUT_RDWR); free(pool->stratum_url); if (pool->sockaddr_url == pool->stratum_url) pool->sockaddr_url = NULL; pool->stratum_url = NULL; } void clear_stratum_shares(struct pool *pool) { int my_mining_threads = mining_threads; // Cached outside of locking struct stratum_share *sshare, *tmpshare; struct work *work; struct cgpu_info *cgpu; double diff_cleared = 0; double thr_diff_cleared[my_mining_threads]; int cleared = 0; int thr_cleared[my_mining_threads]; // NOTE: This is per-thread rather than per-device to avoid getting devices lock in stratum_shares loop for (int i = 0; i < my_mining_threads; ++i) { thr_diff_cleared[i] = 0; thr_cleared[i] = 0; } mutex_lock(&sshare_lock); HASH_ITER(hh, stratum_shares, sshare, tmpshare) { work = sshare->work; if (sshare->work->pool == pool && work->thr_id < my_mining_threads) { HASH_DEL(stratum_shares, sshare); sharelog("disconnect", work); diff_cleared += sshare->work->work_difficulty; thr_diff_cleared[work->thr_id] += work->work_difficulty; ++thr_cleared[work->thr_id]; free_work(sshare->work); free(sshare); cleared++; } } mutex_unlock(&sshare_lock); if (cleared) { applog(LOG_WARNING, "Lost %d shares due to stratum disconnect on pool %d", cleared, pool->pool_no); mutex_lock(&stats_lock); pool->stale_shares += cleared; total_stale += cleared; pool->diff_stale += diff_cleared; total_diff_stale += diff_cleared; for (int i = 0; i < my_mining_threads; ++i) if (thr_cleared[i]) { cgpu = get_thr_cgpu(i); cgpu->diff_stale += thr_diff_cleared[i]; cgpu->stale += thr_cleared[i]; } mutex_unlock(&stats_lock); mutex_lock(&submitting_lock); total_submitting -= cleared; mutex_unlock(&submitting_lock); } } static void resubmit_stratum_shares(struct pool *pool) { struct stratum_share *sshare, *tmpshare; struct work *work; unsigned resubmitted = 0; mutex_lock(&sshare_lock); mutex_lock(&submitting_lock); HASH_ITER(hh, stratum_shares, sshare, tmpshare) { if (sshare->work->pool != pool) continue; HASH_DEL(stratum_shares, sshare); work = sshare->work; DL_APPEND(submit_waiting, work); free(sshare); ++resubmitted; } mutex_unlock(&submitting_lock); mutex_unlock(&sshare_lock); if (resubmitted) { notifier_wake(submit_waiting_notifier); applog(LOG_DEBUG, "Resubmitting %u shares due to stratum disconnect on pool %u", resubmitted, pool->pool_no); } } static void clear_pool_work(struct pool *pool) { struct work *work, *tmp; int cleared = 0; mutex_lock(stgd_lock); HASH_ITER(hh, staged_work, work, tmp) { if (work->pool == pool) { HASH_DEL(staged_work, work); free_work(work); cleared++; staged_full = false; } } mutex_unlock(stgd_lock); } static int cp_prio(void) { int prio; cg_rlock(&control_lock); prio = currentpool->prio; cg_runlock(&control_lock); return prio; } /* We only need to maintain a secondary pool connection when we need the * capacity to get work from the backup pools while still on the primary */ static bool cnx_needed(struct pool *pool) { struct pool *cp; if (pool->enabled != POOL_ENABLED) return false; /* Balance strategies need all pools online */ if (pool_strategy == POOL_BALANCE) return true; if (pool_strategy == POOL_LOADBALANCE) return true; /* Idle stratum pool needs something to kick it alive again */ if (pool->has_stratum && pool->idle) return true; /* Getwork pools without opt_fail_only need backup pools up to be able * to leak shares */ cp = current_pool(); if (cp == pool) return true; if (!pool_localgen(cp) && (!opt_fail_only || !cp->hdr_path)) return true; /* Keep the connection open to allow any stray shares to be submitted * on switching pools for 2 minutes. */ if (!timer_passed(&pool->tv_last_work_time, NULL)) return true; /* If the pool has only just come to life and is higher priority than * the current pool keep the connection open so we can fail back to * it. */ if (pool_strategy == POOL_FAILOVER && pool->prio < cp_prio()) return true; if (pool_unworkable(cp)) return true; /* We've run out of work, bring anything back to life. */ if (no_work) return true; return false; } static void wait_lpcurrent(struct pool *pool); static void pool_resus(struct pool *pool); static void gen_stratum_work(struct pool *pool, struct work *work); static void stratum_resumed(struct pool *pool) { if (!pool->stratum_notify) return; if (pool_tclear(pool, &pool->idle)) { applog(LOG_INFO, "Stratum connection to pool %d resumed", pool->pool_no); pool_resus(pool); } } static bool supports_resume(struct pool *pool) { bool ret; cg_rlock(&pool->data_lock); ret = (pool->sessionid != NULL); cg_runlock(&pool->data_lock); return ret; } /* One stratum thread per pool that has stratum waits on the socket checking * for new messages and for the integrity of the socket connection. We reset * the connection based on the integrity of the receive side only as the send * side will eventually expire data it fails to send. */ static void *stratum_thread(void *userdata) { struct pool *pool = (struct pool *)userdata; pthread_detach(pthread_self()); char threadname[20]; snprintf(threadname, 20, "stratum%u", pool->pool_no); RenameThread(threadname); srand(time(NULL) + (intptr_t)userdata); while (42) { struct timeval timeout; int sel_ret; fd_set rd; char *s; int sock; if (unlikely(!pool->has_stratum)) break; /* Check to see whether we need to maintain this connection * indefinitely or just bring it up when we switch to this * pool */ while (true) { sock = pool->sock; if (sock == INVSOCK) applog(LOG_DEBUG, "Pool %u: Invalid socket, suspending", pool->pool_no); else if (!sock_full(pool) && !cnx_needed(pool)) applog(LOG_DEBUG, "Pool %u: Connection not needed, suspending", pool->pool_no); else break; suspend_stratum(pool); clear_stratum_shares(pool); clear_pool_work(pool); wait_lpcurrent(pool); if (!restart_stratum(pool)) { pool_died(pool); while (!restart_stratum(pool)) { if (pool->removed) goto out; cgsleep_ms(30000); } } } FD_ZERO(&rd); FD_SET(sock, &rd); timeout.tv_sec = 120; timeout.tv_usec = 0; /* If we fail to receive any notify messages for 2 minutes we * assume the connection has been dropped and treat this pool * as dead */ if (!sock_full(pool) && (sel_ret = select(sock + 1, &rd, NULL, NULL, &timeout)) < 1) { applog(LOG_DEBUG, "Stratum select failed on pool %d with value %d", pool->pool_no, sel_ret); s = NULL; } else s = recv_line(pool); if (!s) { if (!pool->has_stratum) break; applog(LOG_NOTICE, "Stratum connection to pool %d interrupted", pool->pool_no); pool->getfail_occasions++; total_go++; mutex_lock(&pool->stratum_lock); pool->stratum_active = pool->stratum_notify = false; pool->sock = INVSOCK; mutex_unlock(&pool->stratum_lock); /* If the socket to our stratum pool disconnects, all * submissions need to be discarded or resent. */ if (!supports_resume(pool)) clear_stratum_shares(pool); else resubmit_stratum_shares(pool); clear_pool_work(pool); if (pool == current_pool()) restart_threads(); if (restart_stratum(pool)) continue; shutdown_stratum(pool); pool_died(pool); break; } /* Check this pool hasn't died while being a backup pool and * has not had its idle flag cleared */ stratum_resumed(pool); if (!parse_method(pool, s) && !parse_stratum_response(pool, s)) applog(LOG_INFO, "Unknown stratum msg: %s", s); free(s); if (pool->swork.clean) { struct work *work = make_work(); /* Generate a single work item to update the current * block database */ pool->swork.clean = false; gen_stratum_work(pool, work); /* Try to extract block height from coinbase scriptSig */ uint8_t *bin_height = &bytes_buf(&pool->swork.coinbase)[4 /*version*/ + 1 /*txin count*/ + 36 /*prevout*/ + 1 /*scriptSig len*/ + 1 /*push opcode*/]; unsigned char cb_height_sz; cb_height_sz = bin_height[-1]; if (cb_height_sz == 3) { // FIXME: The block number will overflow this by AD 2173 uint32_t block_id = ((uint32_t*)work->data)[1]; uint32_t height = 0; memcpy(&height, bin_height, 3); height = le32toh(height); have_block_height(block_id, height); } ++pool->work_restart_id; if (test_work_current(work)) { /* Only accept a work update if this stratum * connection is from the current pool */ if (pool == current_pool()) { restart_threads(); applog( (opt_quiet_work_updates ? LOG_DEBUG : LOG_NOTICE), "Stratum from pool %d requested work update", pool->pool_no); } } else applog(LOG_NOTICE, "Stratum from pool %d detected new block", pool->pool_no); free_work(work); } if (timer_passed(&pool->swork.tv_transparency, NULL)) { // More than 4 timmills past since requested transactions timer_unset(&pool->swork.tv_transparency); pool_set_opaque(pool, true); } } out: return NULL; } static void init_stratum_thread(struct pool *pool) { have_longpoll = true; if (unlikely(pthread_create(&pool->stratum_thread, NULL, stratum_thread, (void *)pool))) quit(1, "Failed to create stratum thread"); } static void *longpoll_thread(void *userdata); static bool stratum_works(struct pool *pool) { applog(LOG_INFO, "Testing pool %d stratum %s", pool->pool_no, pool->stratum_url); if (!extract_sockaddr(pool->stratum_url, &pool->sockaddr_url, &pool->stratum_port)) return false; if (pool->stratum_active) return true; if (!initiate_stratum(pool)) return false; return true; } static bool pool_active(struct pool *pool, bool pinging) { struct timeval tv_getwork, tv_getwork_reply; bool ret = false; json_t *val; CURL *curl; int rolltime; char *rpc_req; struct work *work; enum pool_protocol proto; applog(LOG_INFO, "Testing pool %s", pool->rpc_url); /* This is the central point we activate stratum when we can */ curl = curl_easy_init(); if (unlikely(!curl)) { applog(LOG_ERR, "CURL initialisation failed"); return false; } if (!(want_gbt || want_getwork)) goto nohttp; work = make_work(); /* Probe for GBT support on first pass */ proto = want_gbt ? PLP_GETBLOCKTEMPLATE : PLP_GETWORK; tryagain: rpc_req = prepare_rpc_req_probe(work, proto, NULL); work->pool = pool; if (!rpc_req) goto out; pool->probed = false; cgtime(&tv_getwork); val = json_rpc_call(curl, pool->rpc_url, pool->rpc_userpass, rpc_req, true, false, &rolltime, pool, false); cgtime(&tv_getwork_reply); free(rpc_req); /* Detect if a http getwork pool has an X-Stratum header at startup, * and if so, switch to that in preference to getwork if it works */ if (pool->stratum_url && want_stratum && (pool->has_stratum || stratum_works(pool))) { if (!pool->has_stratum) { applog(LOG_NOTICE, "Switching pool %d %s to %s", pool->pool_no, pool->rpc_url, pool->stratum_url); if (!pool->rpc_url) pool->rpc_url = strdup(pool->stratum_url); pool->has_stratum = true; } free_work(work); if (val) json_decref(val); retry_stratum: curl_easy_cleanup(curl); /* We create the stratum thread for each pool just after * successful authorisation. Once the init flag has been set * we never unset it and the stratum thread is responsible for * setting/unsetting the active flag */ bool init = pool_tset(pool, &pool->stratum_init); if (!init) { bool ret = initiate_stratum(pool) && auth_stratum(pool); if (ret) { detect_algo = 2; init_stratum_thread(pool); } else pool_tclear(pool, &pool->stratum_init); return ret; } return pool->stratum_active; } else if (pool->has_stratum) shutdown_stratum(pool); if (val) { bool rc; json_t *res; res = json_object_get(val, "result"); if ((!json_is_object(res)) || (proto == PLP_GETBLOCKTEMPLATE && !json_object_get(res, "bits"))) goto badwork; work->rolltime = rolltime; rc = work_decode(pool, work, val); if (rc) { applog(LOG_DEBUG, "Successfully retrieved and deciphered work from pool %u %s", pool->pool_no, pool->rpc_url); work->pool = pool; copy_time(&work->tv_getwork, &tv_getwork); copy_time(&work->tv_getwork_reply, &tv_getwork_reply); work->getwork_mode = GETWORK_MODE_TESTPOOL; calc_diff(work, 0); update_last_work(work); applog(LOG_DEBUG, "Pushing pooltest work to base pool"); stage_work(work); total_getworks++; pool->getwork_requested++; ret = true; cgtime(&pool->tv_idle); } else { badwork: json_decref(val); applog(LOG_DEBUG, "Successfully retrieved but FAILED to decipher work from pool %u %s", pool->pool_no, pool->rpc_url); pool->proto = proto = pool_protocol_fallback(proto); if (PLP_NONE != proto) goto tryagain; free_work(work); goto out; } json_decref(val); if (proto != pool->proto) { pool->proto = proto; applog(LOG_INFO, "Selected %s protocol for pool %u", pool_protocol_name(proto), pool->pool_no); } if (pool->lp_url) goto out; /* Decipher the longpoll URL, if any, and store it in ->lp_url */ const struct blktmpl_longpoll_req *lp; if (work->tmpl && (lp = blktmpl_get_longpoll(work->tmpl))) { // NOTE: work_decode takes care of lp id pool->lp_url = lp->uri ? absolute_uri(lp->uri, pool->rpc_url) : pool->rpc_url; if (!pool->lp_url) { ret = false; goto out; } pool->lp_proto = PLP_GETBLOCKTEMPLATE; } else if (pool->hdr_path && want_getwork) { pool->lp_url = absolute_uri(pool->hdr_path, pool->rpc_url); if (!pool->lp_url) { ret = false; goto out; } pool->lp_proto = PLP_GETWORK; } else pool->lp_url = NULL; if (want_longpoll && !pool->lp_started) { pool->lp_started = true; if (unlikely(pthread_create(&pool->longpoll_thread, NULL, longpoll_thread, (void *)pool))) quit(1, "Failed to create pool longpoll thread"); } } else if (PLP_NONE != (proto = pool_protocol_fallback(proto))) { pool->proto = proto; goto tryagain; } else { free_work(work); nohttp: /* If we failed to parse a getwork, this could be a stratum * url without the prefix stratum+tcp:// so let's check it */ if (extract_sockaddr(pool->rpc_url, &pool->sockaddr_url, &pool->stratum_port) && initiate_stratum(pool)) { pool->has_stratum = true; goto retry_stratum; } applog(LOG_DEBUG, "FAILED to retrieve work from pool %u %s", pool->pool_no, pool->rpc_url); if (!pinging) applog(LOG_WARNING, "Pool %u slow/down or URL or credentials invalid", pool->pool_no); } out: curl_easy_cleanup(curl); return ret; } static void pool_resus(struct pool *pool) { if (pool_strategy == POOL_FAILOVER && pool->prio < cp_prio()) applog(LOG_WARNING, "Pool %d %s alive, testing stability", pool->pool_no, pool->rpc_url); else applog(LOG_INFO, "Pool %d %s alive", pool->pool_no, pool->rpc_url); } static struct work *hash_pop(void) { struct work *work = NULL, *tmp; int hc; struct timespec ts; retry: mutex_lock(stgd_lock); while (!HASH_COUNT(staged_work)) { if (unlikely(staged_full)) { if (likely(opt_queue < 10 + mining_threads)) { ++opt_queue; applog(LOG_WARNING, "Staged work underrun; increasing queue minimum to %d", opt_queue); } else applog(LOG_WARNING, "Staged work underrun; not automatically increasing above %d", opt_queue); staged_full = false; // Let it fill up before triggering an underrun again no_work = true; } ts = (struct timespec){ .tv_sec = opt_log_interval, }; pthread_cond_signal(&gws_cond); if (ETIMEDOUT == pthread_cond_timedwait(&getq->cond, stgd_lock, &ts)) { run_cmd(cmd_idle); pthread_cond_signal(&gws_cond); pthread_cond_wait(&getq->cond, stgd_lock); } } no_work = false; hc = HASH_COUNT(staged_work); /* Find clone work if possible, to allow masters to be reused */ if (hc > staged_rollable) { HASH_ITER(hh, staged_work, work, tmp) { if (!work_rollable(work)) break; } } else work = staged_work; if (can_roll(work) && should_roll(work)) { // Instead of consuming it, force it to be cloned and grab the clone mutex_unlock(stgd_lock); clone_available(); goto retry; } HASH_DEL(staged_work, work); if (work_rollable(work)) staged_rollable--; /* Signal the getwork scheduler to look for more work */ pthread_cond_signal(&gws_cond); /* Signal hash_pop again in case there are mutliple hash_pop waiters */ pthread_cond_signal(&getq->cond); mutex_unlock(stgd_lock); work->pool->last_work_time = time(NULL); cgtime(&work->pool->tv_last_work_time); return work; } /* Clones work by rolling it if possible, and returning a clone instead of the * original work item which gets staged again to possibly be rolled again in * the future */ static struct work *clone_work(struct work *work) { int mrs = mining_threads + opt_queue - total_staged(); struct work *work_clone; bool cloned; if (mrs < 1) return work; cloned = false; work_clone = make_clone(work); while (mrs-- > 0 && can_roll(work) && should_roll(work)) { applog(LOG_DEBUG, "Pushing rolled converted work to stage thread"); stage_work(work_clone); roll_work(work); work_clone = make_clone(work); /* Roll it again to prevent duplicates should this be used * directly later on */ roll_work(work); cloned = true; } if (cloned) { stage_work(work); return work_clone; } free_work(work_clone); return work; } void gen_hash(unsigned char *data, unsigned char *hash, int len) { unsigned char hash1[32]; sha256(data, len, hash1); sha256(hash1, 32, hash); } /* Diff 1 is a 256 bit unsigned integer of * 0x00000000ffff0000000000000000000000000000000000000000000000000000 * so we use a big endian 64 bit unsigned integer centred on the 5th byte to * cover a huge range of difficulty targets, though not all 256 bits' worth */ static void bdiff_target_leadzero(unsigned char *target, double diff) { uint64_t *data64, h64; double d64; d64 = diffone; d64 /= diff; d64 = ceil(d64); h64 = d64; memset(target, 0, 32); if (d64 < 18446744073709551616.0) { unsigned char *rtarget = target; memset(rtarget, 0, 32); if (opt_scrypt) data64 = (uint64_t *)(rtarget + 2); else data64 = (uint64_t *)(rtarget + 4); *data64 = htobe64(h64); } else { /* Support for the classic all FFs just-below-1 diff */ if (opt_scrypt) memset(&target[2], 0xff, 30); else memset(&target[4], 0xff, 28); } } void set_target(unsigned char *dest_target, double diff) { unsigned char rtarget[32]; bdiff_target_leadzero(rtarget, diff); swab256(dest_target, rtarget); if (opt_debug) { char htarget[65]; bin2hex(htarget, rtarget, 32); applog(LOG_DEBUG, "Generated target %s", htarget); } } void stratum_work_cpy(struct stratum_work * const dst, const struct stratum_work * const src) { *dst = *src; dst->job_id = strdup(src->job_id); bytes_cpy(&dst->coinbase, &src->coinbase); bytes_cpy(&dst->merkle_bin, &src->merkle_bin); } void stratum_work_clean(struct stratum_work * const swork) { free(swork->job_id); bytes_free(&swork->coinbase); bytes_free(&swork->merkle_bin); } /* Generates stratum based work based on the most recent notify information * from the pool. This will keep generating work while a pool is down so we use * other means to detect when the pool has died in stratum_thread */ static void gen_stratum_work(struct pool *pool, struct work *work) { clean_work(work); cg_wlock(&pool->data_lock); pool->swork.data_lock_p = &pool->data_lock; bytes_resize(&work->nonce2, pool->n2size); if (pool->nonce2sz < pool->n2size) memset(&bytes_buf(&work->nonce2)[pool->nonce2sz], 0, pool->n2size - pool->nonce2sz); memcpy(bytes_buf(&work->nonce2), #ifdef WORDS_BIGENDIAN // NOTE: On big endian, the most significant bits are stored at the end, so skip the LSBs &((char*)&pool->nonce2)[pool->nonce2off], #else &pool->nonce2, #endif pool->nonce2sz); pool->nonce2++; work->pool = pool; work->work_restart_id = work->pool->work_restart_id; gen_stratum_work2(work, &pool->swork, pool->nonce1); cgtime(&work->tv_staged); } void gen_stratum_work2(struct work *work, struct stratum_work *swork, const char *nonce1) { unsigned char *coinbase, merkle_root[32], merkle_sha[64]; uint8_t *merkle_bin; uint32_t *data32, *swap32; int i; /* Generate coinbase */ coinbase = bytes_buf(&swork->coinbase); memcpy(&coinbase[swork->nonce2_offset], bytes_buf(&work->nonce2), bytes_len(&work->nonce2)); /* Downgrade to a read lock to read off the variables */ if (swork->data_lock_p) cg_dwlock(swork->data_lock_p); /* Generate merkle root */ gen_hash(coinbase, merkle_root, bytes_len(&swork->coinbase)); memcpy(merkle_sha, merkle_root, 32); merkle_bin = bytes_buf(&swork->merkle_bin); for (i = 0; i < swork->merkles; ++i, merkle_bin += 32) { memcpy(merkle_sha + 32, merkle_bin, 32); gen_hash(merkle_sha, merkle_root, 64); memcpy(merkle_sha, merkle_root, 32); } data32 = (uint32_t *)merkle_sha; swap32 = (uint32_t *)merkle_root; flip32(swap32, data32); memcpy(&work->data[0], swork->header1, 36); memcpy(&work->data[36], merkle_root, 32); *((uint32_t*)&work->data[68]) = htobe32(swork->ntime + timer_elapsed(&swork->tv_received, NULL)); memcpy(&work->data[72], swork->diffbits, 4); memset(&work->data[76], 0, 4); // nonce memcpy(&work->data[80], workpadding_bin, 48); /* Store the stratum work diff to check it still matches the pool's * stratum diff when submitting shares */ work->sdiff = swork->diff; /* Copy parameters required for share submission */ work->job_id = strdup(swork->job_id); work->nonce1 = strdup(nonce1); if (swork->data_lock_p) cg_runlock(swork->data_lock_p); if (opt_debug) { char header[161]; char nonce2hex[(bytes_len(&work->nonce2) * 2) + 1]; bin2hex(header, work->data, 80); bin2hex(nonce2hex, bytes_buf(&work->nonce2), bytes_len(&work->nonce2)); applog(LOG_DEBUG, "Generated stratum header %s", header); applog(LOG_DEBUG, "Work job_id %s nonce2 %s", work->job_id, nonce2hex); } calc_midstate(work); set_target(work->target, work->sdiff); local_work++; work->stratum = true; work->blk.nonce = 0; work->id = total_work++; work->longpoll = false; work->getwork_mode = GETWORK_MODE_STRATUM; /* Nominally allow a driver to ntime roll 60 seconds */ work->drv_rolllimit = 60; calc_diff(work, 0); } void request_work(struct thr_info *thr) { struct cgpu_info *cgpu = thr->cgpu; struct cgminer_stats *dev_stats = &(cgpu->cgminer_stats); /* Tell the watchdog thread this thread is waiting on getwork and * should not be restarted */ thread_reportout(thr); // HACK: Since get_work still blocks, reportout all processors dependent on this thread for (struct cgpu_info *proc = thr->cgpu->next_proc; proc; proc = proc->next_proc) { if (proc->threads) break; thread_reportout(proc->thr[0]); } cgtime(&dev_stats->_get_start); } // FIXME: Make this non-blocking (and remove HACK above) struct work *get_work(struct thr_info *thr) { const int thr_id = thr->id; struct cgpu_info *cgpu = thr->cgpu; struct cgminer_stats *dev_stats = &(cgpu->cgminer_stats); struct cgminer_stats *pool_stats; struct timeval tv_get; struct work *work = NULL; applog(LOG_DEBUG, "%"PRIpreprv": Popping work from get queue to get work", cgpu->proc_repr); while (!work) { work = hash_pop(); if (stale_work(work, false)) { staged_full = false; // It wasn't really full, since it was stale :( discard_work(work); work = NULL; wake_gws(); } } applog(LOG_DEBUG, "%"PRIpreprv": Got work %d from get queue to get work for thread %d", cgpu->proc_repr, work->id, thr_id); work->thr_id = thr_id; thread_reportin(thr); // HACK: Since get_work still blocks, reportin all processors dependent on this thread for (struct cgpu_info *proc = thr->cgpu->next_proc; proc; proc = proc->next_proc) { if (proc->threads) break; thread_reportin(proc->thr[0]); } work->mined = true; work->blk.nonce = 0; cgtime(&tv_get); timersub(&tv_get, &dev_stats->_get_start, &tv_get); timeradd(&tv_get, &dev_stats->getwork_wait, &dev_stats->getwork_wait); if (timercmp(&tv_get, &dev_stats->getwork_wait_max, >)) dev_stats->getwork_wait_max = tv_get; if (timercmp(&tv_get, &dev_stats->getwork_wait_min, <)) dev_stats->getwork_wait_min = tv_get; ++dev_stats->getwork_calls; pool_stats = &(work->pool->cgminer_stats); timeradd(&tv_get, &pool_stats->getwork_wait, &pool_stats->getwork_wait); if (timercmp(&tv_get, &pool_stats->getwork_wait_max, >)) pool_stats->getwork_wait_max = tv_get; if (timercmp(&tv_get, &pool_stats->getwork_wait_min, <)) pool_stats->getwork_wait_min = tv_get; ++pool_stats->getwork_calls; return work; } static void _submit_work_async(struct work *work) { applog(LOG_DEBUG, "Pushing submit work to work thread"); mutex_lock(&submitting_lock); ++total_submitting; DL_APPEND(submit_waiting, work); mutex_unlock(&submitting_lock); notifier_wake(submit_waiting_notifier); } /* Submit a copy of the tested, statistic recorded work item asynchronously */ static void submit_work_async2(struct work *work, struct timeval *tv_work_found) { if (tv_work_found) copy_time(&work->tv_work_found, tv_work_found); _submit_work_async(work); } void inc_hw_errors2(struct thr_info *thr, const struct work *work, const uint32_t *bad_nonce_p) { struct cgpu_info * const cgpu = thr->cgpu; if (bad_nonce_p) { if (bad_nonce_p == UNKNOWN_NONCE) applog(LOG_DEBUG, "%"PRIpreprv": invalid nonce - HW error", cgpu->proc_repr); else applog(LOG_DEBUG, "%"PRIpreprv": invalid nonce (%08lx) - HW error", cgpu->proc_repr, (unsigned long)be32toh(*bad_nonce_p)); } mutex_lock(&stats_lock); hw_errors++; ++cgpu->hw_errors; if (bad_nonce_p) { ++total_bad_nonces; ++cgpu->bad_nonces; } mutex_unlock(&stats_lock); if (thr->cgpu->drv->hw_error) thr->cgpu->drv->hw_error(thr); } void inc_hw_errors(struct thr_info *thr, const struct work *work, const uint32_t bad_nonce) { inc_hw_errors2(thr, work, work ? &bad_nonce : NULL); } enum test_nonce2_result hashtest2(struct work *work, bool checktarget) { uint32_t *hash2_32 = (uint32_t *)&work->hash[0]; hash_data(work->hash, work->data); if (hash2_32[7] != 0) return TNR_BAD; if (!checktarget) return TNR_GOOD; if (!hash_target_check_v(work->hash, work->target)) return TNR_HIGH; return TNR_GOOD; } enum test_nonce2_result _test_nonce2(struct work *work, uint32_t nonce, bool checktarget) { uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12); *work_nonce = htole32(nonce); #ifdef USE_SCRYPT if (opt_scrypt) // NOTE: Depends on scrypt_test return matching enum values return scrypt_test(work->data, work->target, nonce); #endif return hashtest2(work, checktarget); } /* Returns true if nonce for work was a valid share */ bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce) { return submit_noffset_nonce(thr, work, nonce, 0); } /* Allows drivers to submit work items where the driver has changed the ntime * value by noffset. Must be only used with a work protocol that does not ntime * roll itself intrinsically to generate work (eg stratum). We do not touch * the original work struct, but the copy of it only. */ bool submit_noffset_nonce(struct thr_info *thr, struct work *work_in, uint32_t nonce, int noffset) { struct work *work = make_work(); _copy_work(work, work_in, noffset); uint32_t *work_nonce = (uint32_t *)(work->data + 64 + 12); struct timeval tv_work_found; enum test_nonce2_result res; bool ret = true; thread_reportout(thr); cgtime(&tv_work_found); *work_nonce = htole32(nonce); work->thr_id = thr->id; /* Do one last check before attempting to submit the work */ /* Side effect: sets work->data for us */ res = test_nonce2(work, nonce); if (unlikely(res == TNR_BAD)) { inc_hw_errors(thr, work, nonce); ret = false; goto out; } mutex_lock(&stats_lock); total_diff1++; thr->cgpu->diff1++; work->pool->diff1++; thr->cgpu->last_device_valid_work = time(NULL); mutex_unlock(&stats_lock); if (noncelog_file) noncelog(work); if (res == TNR_HIGH) { // Share above target, normal /* Check the diff of the share, even if it didn't reach the * target, just to set the best share value if it's higher. */ share_diff(work); goto out; } submit_work_async2(work, &tv_work_found); work = NULL; // Taken by submit_work_async2 out: if (work) free_work(work); thread_reportin(thr); return ret; } bool abandon_work(struct work *work, struct timeval *wdiff, uint64_t hashes) { if (wdiff->tv_sec > opt_scantime || work->blk.nonce >= MAXTHREADS - hashes || hashes >= 0xfffffffe || stale_work(work, false)) return true; return false; } void __thr_being_msg(int prio, struct thr_info *thr, const char *being) { struct cgpu_info *proc = thr->cgpu; if (proc->threads > 1) applog(prio, "%"PRIpreprv" (thread %d) %s", proc->proc_repr, thr->id, being); else applog(prio, "%"PRIpreprv" %s", proc->proc_repr, being); } // Called by asynchronous minerloops, when they find their processor should be disabled void mt_disable_start(struct thr_info *mythr) { struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *drv = cgpu->drv; if (drv->thread_disable) drv->thread_disable(mythr); hashmeter2(mythr); if (mythr->prev_work) free_work(mythr->prev_work); mythr->prev_work = mythr->work; mythr->work = NULL; mythr->_job_transition_in_progress = false; __thr_being_msg(LOG_WARNING, mythr, "being disabled"); mythr->rolling = mythr->cgpu->rolling = 0; thread_reportout(mythr); mythr->_mt_disable_called = true; } /* Put a new unqueued work item in cgpu->unqueued_work under cgpu->qlock till * the driver tells us it's full so that it may extract the work item using * the get_queued() function which adds it to the hashtable on * cgpu->queued_work. */ static void fill_queue(struct thr_info *mythr, struct cgpu_info *cgpu, struct device_drv *drv, const int thr_id) { thread_reportout(mythr); do { bool need_work; /* Do this lockless just to know if we need more unqueued work. */ need_work = (!cgpu->unqueued_work); /* get_work is a blocking function so do it outside of lock * to prevent deadlocks with other locks. */ if (need_work) { struct work *work = get_work(mythr); wr_lock(&cgpu->qlock); /* Check we haven't grabbed work somehow between * checking and picking up the lock. */ if (likely(!cgpu->unqueued_work)) cgpu->unqueued_work = work; else need_work = false; wr_unlock(&cgpu->qlock); if (unlikely(!need_work)) discard_work(work); } /* The queue_full function should be used by the driver to * actually place work items on the physical device if it * does have a queue. */ } while (drv->queue_full && !drv->queue_full(cgpu)); } /* Add a work item to a cgpu's queued hashlist */ void __add_queued(struct cgpu_info *cgpu, struct work *work) { cgpu->queued_count++; HASH_ADD_INT(cgpu->queued_work, id, work); } /* This function is for retrieving one work item from the unqueued pointer and * adding it to the hashtable of queued work. Code using this function must be * able to handle NULL as a return which implies there is no work available. */ struct work *get_queued(struct cgpu_info *cgpu) { struct work *work = NULL; wr_lock(&cgpu->qlock); if (cgpu->unqueued_work) { work = cgpu->unqueued_work; __add_queued(cgpu, work); cgpu->unqueued_work = NULL; } wr_unlock(&cgpu->qlock); return work; } void add_queued(struct cgpu_info *cgpu, struct work *work) { wr_lock(&cgpu->qlock); __add_queued(cgpu, work); wr_unlock(&cgpu->qlock); } /* Get fresh work and add it to cgpu's queued hashlist */ struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id) { struct work *work = get_work(thr); add_queued(cgpu, work); return work; } /* This function is for finding an already queued work item in the * given que hashtable. Code using this function must be able * to handle NULL as a return which implies there is no matching work. * The calling function must lock access to the que if it is required. * The common values for midstatelen, offset, datalen are 32, 64, 12 */ struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) { struct work *work, *tmp, *ret = NULL; HASH_ITER(hh, que, work, tmp) { if (memcmp(work->midstate, midstate, midstatelen) == 0 && memcmp(work->data + offset, data, datalen) == 0) { ret = work; break; } } return ret; } /* This function is for finding an already queued work item in the * device's queued_work hashtable. Code using this function must be able * to handle NULL as a return which implies there is no matching work. * The common values for midstatelen, offset, datalen are 32, 64, 12 */ struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) { struct work *ret; rd_lock(&cgpu->qlock); ret = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen); rd_unlock(&cgpu->qlock); return ret; } struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) { struct work *work, *ret = NULL; rd_lock(&cgpu->qlock); work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen); if (work) ret = copy_work(work); rd_unlock(&cgpu->qlock); return ret; } void __work_completed(struct cgpu_info *cgpu, struct work *work) { cgpu->queued_count--; HASH_DEL(cgpu->queued_work, work); } /* This function should be used by queued device drivers when they're sure * the work struct is no longer in use. */ void work_completed(struct cgpu_info *cgpu, struct work *work) { wr_lock(&cgpu->qlock); __work_completed(cgpu, work); wr_unlock(&cgpu->qlock); free_work(work); } /* Combines find_queued_work_bymidstate and work_completed in one function * withOUT destroying the work so the driver must free it. */ struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen) { struct work *work; wr_lock(&cgpu->qlock); work = __find_work_bymidstate(cgpu->queued_work, midstate, midstatelen, data, offset, datalen); if (work) __work_completed(cgpu, work); wr_unlock(&cgpu->qlock); return work; } static void flush_queue(struct cgpu_info *cgpu) { struct work *work = NULL; wr_lock(&cgpu->qlock); work = cgpu->unqueued_work; cgpu->unqueued_work = NULL; wr_unlock(&cgpu->qlock); if (work) { free_work(work); applog(LOG_DEBUG, "Discarded queued work item"); } } /* This version of hash work is for devices that are fast enough to always * perform a full nonce range and need a queue to maintain the device busy. * Work creation and destruction is not done from within this function * directly. */ void hash_queued_work(struct thr_info *mythr) { const long cycle = opt_log_interval / 5 ? : 1; struct timeval tv_start = {0, 0}, tv_end; struct cgpu_info *cgpu = mythr->cgpu; struct device_drv *drv = cgpu->drv; const int thr_id = mythr->id; int64_t hashes_done = 0; if (unlikely(cgpu->deven != DEV_ENABLED)) mt_disable(mythr); while (likely(!cgpu->shutdown)) { struct timeval diff; int64_t hashes; fill_queue(mythr, cgpu, drv, thr_id); thread_reportin(mythr); hashes = drv->scanwork(mythr); /* Reset the bool here in case the driver looks for it * synchronously in the scanwork loop. */ mythr->work_restart = false; if (unlikely(hashes == -1 )) { applog(LOG_ERR, "%s %d failure, disabling!", drv->name, cgpu->device_id); cgpu->deven = DEV_DISABLED; dev_error(cgpu, REASON_THREAD_ZERO_HASH); mt_disable(mythr); } hashes_done += hashes; cgtime(&tv_end); timersub(&tv_end, &tv_start, &diff); if (diff.tv_sec >= cycle) { hashmeter(thr_id, &diff, hashes_done); hashes_done = 0; copy_time(&tv_start, &tv_end); } if (unlikely(mythr->pause || cgpu->deven != DEV_ENABLED)) mt_disable(mythr); if (unlikely(mythr->work_restart)) { flush_queue(cgpu); if (drv->flush_work) drv->flush_work(cgpu); } } // cgpu->deven = DEV_DISABLED; set in miner_thread } // Called by minerloop, when it is re-enabling a processor void mt_disable_finish(struct thr_info *mythr) { struct device_drv *drv = mythr->cgpu->drv; thread_reportin(mythr); __thr_being_msg(LOG_WARNING, mythr, "being re-enabled"); if (drv->thread_enable) drv->thread_enable(mythr); mythr->_mt_disable_called = false; } // Called by synchronous minerloops, when they find their processor should be disabled // Calls mt_disable_start, waits until it's re-enabled, then calls mt_disable_finish void mt_disable(struct thr_info *mythr) { const struct cgpu_info * const cgpu = mythr->cgpu; mt_disable_start(mythr); applog(LOG_DEBUG, "Waiting for wakeup notification in miner thread"); do { notifier_read(mythr->notifier); } while (mythr->pause || cgpu->deven != DEV_ENABLED); mt_disable_finish(mythr); } enum { STAT_SLEEP_INTERVAL = 1, STAT_CTR_INTERVAL = 10000000, FAILURE_INTERVAL = 30, }; /* Stage another work item from the work returned in a longpoll */ static void convert_to_work(json_t *val, int rolltime, struct pool *pool, struct work *work, struct timeval *tv_lp, struct timeval *tv_lp_reply) { bool rc; work->rolltime = rolltime; rc = work_decode(pool, work, val); if (unlikely(!rc)) { applog(LOG_ERR, "Could not convert longpoll data to work"); free_work(work); return; } total_getworks++; pool->getwork_requested++; work->pool = pool; copy_time(&work->tv_getwork, tv_lp); copy_time(&work->tv_getwork_reply, tv_lp_reply); calc_diff(work, 0); if (pool->enabled == POOL_REJECTING) work->mandatory = true; work->longpoll = true; work->getwork_mode = GETWORK_MODE_LP; update_last_work(work); /* We'll be checking this work item twice, but we already know it's * from a new block so explicitly force the new block detection now * rather than waiting for it to hit the stage thread. This also * allows testwork to know whether LP discovered the block or not. */ test_work_current(work); /* Don't use backup LPs as work if we have failover-only enabled. Use * the longpoll work from a pool that has been rejecting shares as a * way to detect when the pool has recovered. */ if (pool != current_pool() && opt_fail_only && pool->enabled != POOL_REJECTING) { free_work(work); return; } work = clone_work(work); applog(LOG_DEBUG, "Pushing converted work to stage thread"); stage_work(work); applog(LOG_DEBUG, "Converted longpoll data to work"); } /* If we want longpoll, enable it for the chosen default pool, or, if * the pool does not support longpoll, find the first one that does * and use its longpoll support */ static struct pool *select_longpoll_pool(struct pool *cp) { int i; if (cp->lp_url) return cp; for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; if (pool->has_stratum || pool->lp_url) return pool; } return NULL; } /* This will make the longpoll thread wait till it's the current pool, or it * has been flagged as rejecting, before attempting to open any connections. */ static void wait_lpcurrent(struct pool *pool) { while (!cnx_needed(pool) && (pool->enabled == POOL_DISABLED || (pool != current_pool() && pool_strategy != POOL_LOADBALANCE && pool_strategy != POOL_BALANCE))) { mutex_lock(&lp_lock); pthread_cond_wait(&lp_cond, &lp_lock); mutex_unlock(&lp_lock); } } static curl_socket_t save_curl_socket(void *vpool, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr) { struct pool *pool = vpool; curl_socket_t sock = socket(addr->family, addr->socktype, addr->protocol); pool->lp_socket = sock; return sock; } static void *longpoll_thread(void *userdata) { struct pool *cp = (struct pool *)userdata; /* This *pool is the source of the actual longpoll, not the pool we've * tied it to */ struct timeval start, reply, end; struct pool *pool = NULL; char threadname[20]; CURL *curl = NULL; int failures = 0; char *lp_url; int rolltime; #ifndef HAVE_PTHREAD_CANCEL pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); #endif snprintf(threadname, 20, "longpoll%u", cp->pool_no); RenameThread(threadname); curl = curl_easy_init(); if (unlikely(!curl)) { applog(LOG_ERR, "CURL initialisation failed"); return NULL; } retry_pool: pool = select_longpoll_pool(cp); if (!pool) { applog(LOG_WARNING, "No suitable long-poll found for %s", cp->rpc_url); while (!pool) { cgsleep_ms(60000); pool = select_longpoll_pool(cp); } } if (pool->has_stratum) { applog(LOG_WARNING, "Block change for %s detection via %s stratum", cp->rpc_url, pool->rpc_url); goto out; } /* Any longpoll from any pool is enough for this to be true */ have_longpoll = true; wait_lpcurrent(cp); { lp_url = pool->lp_url; if (cp == pool) applog(LOG_WARNING, "Long-polling activated for %s (%s)", lp_url, pool_protocol_name(pool->lp_proto)); else applog(LOG_WARNING, "Long-polling activated for %s via %s (%s)", cp->rpc_url, lp_url, pool_protocol_name(pool->lp_proto)); } while (42) { json_t *val, *soval; struct work *work = make_work(); char *lpreq; lpreq = prepare_rpc_req(work, pool->lp_proto, pool->lp_id); work->pool = pool; if (!lpreq) { free_work(work); goto lpfail; } wait_lpcurrent(cp); cgtime(&start); /* Longpoll connections can be persistent for a very long time * and any number of issues could have come up in the meantime * so always establish a fresh connection instead of relying on * a persistent one. */ curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, save_curl_socket); curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool); val = json_rpc_call(curl, lp_url, pool->rpc_userpass, lpreq, false, true, &rolltime, pool, false); pool->lp_socket = CURL_SOCKET_BAD; cgtime(&reply); free(lpreq); if (likely(val)) { soval = json_object_get(json_object_get(val, "result"), "submitold"); if (soval) pool->submit_old = json_is_true(soval); else pool->submit_old = false; convert_to_work(val, rolltime, pool, work, &start, &reply); failures = 0; json_decref(val); } else { /* Some pools regularly drop the longpoll request so * only see this as longpoll failure if it happens * immediately and just restart it the rest of the * time. */ cgtime(&end); free_work(work); if (end.tv_sec - start.tv_sec > 30) continue; if (failures == 1) applog(LOG_WARNING, "longpoll failed for %s, retrying every 30s", lp_url); lpfail: cgsleep_ms(30000); } if (pool != cp) { pool = select_longpoll_pool(cp); if (pool->has_stratum) { applog(LOG_WARNING, "Block change for %s detection via %s stratum", cp->rpc_url, pool->rpc_url); break; } if (unlikely(!pool)) goto retry_pool; } if (unlikely(pool->removed)) break; } out: curl_easy_cleanup(curl); return NULL; } static void stop_longpoll(void) { int i; want_longpoll = false; for (i = 0; i < total_pools; ++i) { struct pool *pool = pools[i]; if (unlikely(!pool->lp_started)) continue; pool->lp_started = false; pthread_cancel(pool->longpoll_thread); } have_longpoll = false; } static void start_longpoll(void) { int i; want_longpoll = true; for (i = 0; i < total_pools; ++i) { struct pool *pool = pools[i]; if (unlikely(pool->removed || pool->lp_started || !pool->lp_url)) continue; pool->lp_started = true; if (unlikely(pthread_create(&pool->longpoll_thread, NULL, longpoll_thread, (void *)pool))) quit(1, "Failed to create pool longpoll thread"); } } void reinit_device(struct cgpu_info *cgpu) { if (cgpu->drv->reinit_device) cgpu->drv->reinit_device(cgpu); } static struct timeval rotate_tv; /* We reap curls if they are unused for over a minute */ static void reap_curl(struct pool *pool) { struct curl_ent *ent, *iter; struct timeval now; int reaped = 0; cgtime(&now); mutex_lock(&pool->pool_lock); LL_FOREACH_SAFE(pool->curllist, ent, iter) { if (pool->curls < 2) break; if (now.tv_sec - ent->tv.tv_sec > 300) { reaped++; pool->curls--; LL_DELETE(pool->curllist, ent); curl_easy_cleanup(ent->curl); free(ent); } } mutex_unlock(&pool->pool_lock); if (reaped) applog(LOG_DEBUG, "Reaped %d curl%s from pool %d", reaped, reaped > 1 ? "s" : "", pool->pool_no); } static void *watchpool_thread(void __maybe_unused *userdata) { int intervals = 0; #ifndef HAVE_PTHREAD_CANCEL pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); #endif RenameThread("watchpool"); while (42) { struct timeval now; int i; if (++intervals > 20) intervals = 0; cgtime(&now); for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; if (!opt_benchmark) reap_curl(pool); /* Get a rolling utility per pool over 10 mins */ if (intervals > 19) { int shares = pool->diff1 - pool->last_shares; pool->last_shares = pool->diff1; pool->utility = (pool->utility + (double)shares * 0.63) / 1.63; pool->shares = pool->utility; } if (pool->enabled == POOL_DISABLED) continue; /* Don't start testing any pools if the test threads * from startup are still doing their first attempt. */ if (unlikely(pool->testing)) { pthread_join(pool->test_thread, NULL); } /* Test pool is idle once every minute */ if (pool->idle && now.tv_sec - pool->tv_idle.tv_sec > 30) { cgtime(&pool->tv_idle); if (pool_active(pool, true) && pool_tclear(pool, &pool->idle)) pool_resus(pool); } /* Only switch pools if the failback pool has been * alive for more than 5 minutes to prevent * intermittently failing pools from being used. */ if (!pool->idle && pool_strategy == POOL_FAILOVER && pool->prio < cp_prio() && now.tv_sec - pool->tv_idle.tv_sec > 300) { applog(LOG_WARNING, "Pool %d %s stable for 5 mins", pool->pool_no, pool->rpc_url); switch_pools(NULL); } } if (current_pool()->idle) switch_pools(NULL); if (pool_strategy == POOL_ROTATE && now.tv_sec - rotate_tv.tv_sec > 60 * opt_rotate_period) { cgtime(&rotate_tv); switch_pools(NULL); } cgsleep_ms(30000); } return NULL; } void mt_enable(struct thr_info *thr) { applog(LOG_DEBUG, "Waking up thread %d", thr->id); notifier_wake(thr->notifier); } void proc_enable(struct cgpu_info *cgpu) { int j; cgpu->deven = DEV_ENABLED; for (j = cgpu->threads ?: 1; j--; ) mt_enable(cgpu->thr[j]); } #define device_recovered(cgpu) proc_enable(cgpu) void cgpu_set_defaults(struct cgpu_info * const cgpu) { const struct device_drv * const drv = cgpu->drv; struct string_elist *setstr_elist; const char *p, *p2; char replybuf[0x2000]; size_t L; DL_FOREACH(opt_set_device_list, setstr_elist) { const char * const setstr = setstr_elist->string; p = strchr(setstr, ':'); if (!p) p = setstr; { L = p - setstr; char pattern[L + 1]; if (L) memcpy(pattern, setstr, L); pattern[L] = '\0'; if (!cgpu_match(pattern, cgpu)) continue; } applog(LOG_DEBUG, "%"PRIpreprv": %s: Matched with set default: %s", cgpu->proc_repr, __func__, setstr); if (!drv->set_device) { applog(LOG_WARNING, "%"PRIpreprv": set_device is not implemented (trying to apply rule: %s)", cgpu->proc_repr, setstr); continue; } if (p[0] == ':') ++p; p2 = strchr(p, '='); if (!p2) { L = strlen(p); p2 = ""; } else { L = p2 - p; ++p2; } char opt[L + 1]; if (L) memcpy(opt, p, L); opt[L] = '\0'; L = strlen(p2); char setval[L + 1]; if (L) memcpy(setval, p2, L); setval[L] = '\0'; p = drv->set_device(cgpu, opt, setval, replybuf); if (p) applog(LOG_WARNING, "%"PRIpreprv": Applying rule %s: %s", cgpu->proc_repr, setstr, p); else applog(LOG_DEBUG, "%"PRIpreprv": Applied rule %s", cgpu->proc_repr, setstr); } cgpu->already_set_defaults = true; } void drv_set_defaults(const struct device_drv * const drv, char *(*set_func)(struct cgpu_info *, char *, char *, char *), void *userp) { struct device_drv dummy_drv = *drv; struct cgpu_info dummy_cgpu = { .drv = &dummy_drv, .device = &dummy_cgpu, .device_id = -1, .proc_id = -1, .device_data = userp, }; strcpy(dummy_cgpu.proc_repr, drv->name); dummy_drv.set_device = set_func; cgpu_set_defaults(&dummy_cgpu); } /* Makes sure the hashmeter keeps going even if mining threads stall, updates * the screen at regular intervals, and restarts threads if they appear to have * died. */ #define WATCHDOG_SICK_TIME 60 #define WATCHDOG_DEAD_TIME 600 #define WATCHDOG_SICK_COUNT (WATCHDOG_SICK_TIME/WATCHDOG_INTERVAL) #define WATCHDOG_DEAD_COUNT (WATCHDOG_DEAD_TIME/WATCHDOG_INTERVAL) static void *watchdog_thread(void __maybe_unused *userdata) { const unsigned int interval = WATCHDOG_INTERVAL; struct timeval zero_tv; #ifndef HAVE_PTHREAD_CANCEL pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); #endif RenameThread("watchdog"); memset(&zero_tv, 0, sizeof(struct timeval)); cgtime(&rotate_tv); while (1) { int i; struct timeval now; sleep(interval); discard_stale(); hashmeter(-1, &zero_tv, 0); #ifdef HAVE_CURSES const int ts = total_staged(); if (curses_active_locked()) { change_logwinsize(); curses_print_status(ts); _refresh_devstatus(true); touchwin(logwin); wrefresh(logwin); unlock_curses(); } #endif cgtime(&now); if (!sched_paused && !should_run()) { applog(LOG_WARNING, "Pausing execution as per stop time %02d:%02d scheduled", schedstop.tm.tm_hour, schedstop.tm.tm_min); if (!schedstart.enable) { quit(0, "Terminating execution as planned"); break; } applog(LOG_WARNING, "Will restart execution as scheduled at %02d:%02d", schedstart.tm.tm_hour, schedstart.tm.tm_min); sched_paused = true; rd_lock(&mining_thr_lock); for (i = 0; i < mining_threads; i++) mining_thr[i]->pause = true; rd_unlock(&mining_thr_lock); } else if (sched_paused && should_run()) { applog(LOG_WARNING, "Restarting execution as per start time %02d:%02d scheduled", schedstart.tm.tm_hour, schedstart.tm.tm_min); if (schedstop.enable) applog(LOG_WARNING, "Will pause execution as scheduled at %02d:%02d", schedstop.tm.tm_hour, schedstop.tm.tm_min); sched_paused = false; for (i = 0; i < mining_threads; i++) { struct thr_info *thr; thr = get_thread(i); thr->pause = false; } for (i = 0; i < total_devices; ++i) { struct cgpu_info *cgpu = get_devices(i); /* Don't touch disabled devices */ if (cgpu->deven == DEV_DISABLED) continue; proc_enable(cgpu); } } for (i = 0; i < total_devices; ++i) { struct cgpu_info *cgpu = get_devices(i); if (!cgpu->disable_watchdog) bfg_watchdog(cgpu, &now); } } return NULL; } void bfg_watchdog(struct cgpu_info * const cgpu, struct timeval * const tvp_now) { struct thr_info *thr = cgpu->thr[0]; enum dev_enable *denable; char *dev_str = cgpu->proc_repr; int gpu; if (likely(drv_ready(cgpu))) { if (unlikely(!cgpu->already_set_defaults)) cgpu_set_defaults(cgpu); if (cgpu->drv->get_stats) cgpu->drv->get_stats(cgpu); } gpu = cgpu->device_id; denable = &cgpu->deven; #ifdef HAVE_ADL if (adl_active && cgpu->has_adl) gpu_autotune(gpu, denable); if (opt_debug && cgpu->has_adl) { int engineclock = 0, memclock = 0, activity = 0, fanspeed = 0, fanpercent = 0, powertune = 0; float temp = 0, vddc = 0; if (gpu_stats(gpu, &temp, &engineclock, &memclock, &vddc, &activity, &fanspeed, &fanpercent, &powertune)) applog(LOG_DEBUG, "%.1f C F: %d%%(%dRPM) E: %dMHz M: %dMHz V: %.3fV A: %d%% P: %d%%", temp, fanpercent, fanspeed, engineclock, memclock, vddc, activity, powertune); } #endif /* Thread is disabled */ if (*denable == DEV_DISABLED) return; else if (*denable == DEV_RECOVER_ERR) { if (opt_restart && timer_elapsed(&cgpu->tv_device_last_not_well, NULL) > cgpu->reinit_backoff) { applog(LOG_NOTICE, "Attempting to reinitialize %s", dev_str); if (cgpu->reinit_backoff < 300) cgpu->reinit_backoff *= 2; device_recovered(cgpu); } return; } else if (*denable == DEV_RECOVER) { if (opt_restart && cgpu->temp < cgpu->targettemp) { applog(LOG_NOTICE, "%s recovered to temperature below target, re-enabling", dev_str); device_recovered(cgpu); } dev_error_update(cgpu, REASON_DEV_THERMAL_CUTOFF); return; } else if (cgpu->temp > cgpu->cutofftemp) { applog(LOG_WARNING, "%s hit thermal cutoff limit, disabling!", dev_str); *denable = DEV_RECOVER; dev_error(cgpu, REASON_DEV_THERMAL_CUTOFF); run_cmd(cmd_idle); } if (thr->getwork) { if (cgpu->status == LIFE_WELL && thr->getwork < tvp_now->tv_sec - opt_log_interval) { int thrid; bool cgpu_idle = true; thr->rolling = 0; for (thrid = 0; thrid < cgpu->threads; ++thrid) if (!cgpu->thr[thrid]->getwork) cgpu_idle = false; if (cgpu_idle) { cgpu->rolling = 0; cgpu->status = LIFE_WAIT; } } return; } else if (cgpu->status == LIFE_WAIT) cgpu->status = LIFE_WELL; #ifdef WANT_CPUMINE if (!strcmp(cgpu->drv->dname, "cpu")) return; #endif if (cgpu->status != LIFE_WELL && (tvp_now->tv_sec - thr->last.tv_sec < WATCHDOG_SICK_TIME)) { if (likely(cgpu->status != LIFE_INIT && cgpu->status != LIFE_INIT2)) applog(LOG_ERR, "%s: Recovered, declaring WELL!", dev_str); cgpu->status = LIFE_WELL; cgpu->device_last_well = time(NULL); } else if (cgpu->status == LIFE_WELL && (tvp_now->tv_sec - thr->last.tv_sec > WATCHDOG_SICK_TIME)) { thr->rolling = cgpu->rolling = 0; cgpu->status = LIFE_SICK; applog(LOG_ERR, "%s: Idle for more than 60 seconds, declaring SICK!", dev_str); cgtime(&thr->sick); dev_error(cgpu, REASON_DEV_SICK_IDLE_60); run_cmd(cmd_sick); #ifdef HAVE_ADL if (adl_active && cgpu->has_adl && gpu_activity(gpu) > 50) { applog(LOG_ERR, "GPU still showing activity suggesting a hard hang."); applog(LOG_ERR, "Will not attempt to auto-restart it."); } else #endif if (opt_restart && cgpu->drv->reinit_device) { applog(LOG_ERR, "%s: Attempting to restart", dev_str); reinit_device(cgpu); } } else if (cgpu->status == LIFE_SICK && (tvp_now->tv_sec - thr->last.tv_sec > WATCHDOG_DEAD_TIME)) { cgpu->status = LIFE_DEAD; applog(LOG_ERR, "%s: Not responded for more than 10 minutes, declaring DEAD!", dev_str); cgtime(&thr->sick); dev_error(cgpu, REASON_DEV_DEAD_IDLE_600); run_cmd(cmd_dead); } else if (tvp_now->tv_sec - thr->sick.tv_sec > 60 && (cgpu->status == LIFE_SICK || cgpu->status == LIFE_DEAD)) { /* Attempt to restart a GPU that's sick or dead once every minute */ cgtime(&thr->sick); #ifdef HAVE_ADL if (adl_active && cgpu->has_adl && gpu_activity(gpu) > 50) { /* Again do not attempt to restart a device that may have hard hung */ } else #endif if (opt_restart) reinit_device(cgpu); } } static void log_print_status(struct cgpu_info *cgpu) { char logline[255]; get_statline(logline, sizeof(logline), cgpu); applog(LOG_WARNING, "%s", logline); } void print_summary(void) { struct timeval diff; int hours, mins, secs, i; double utility, efficiency = 0.0; char xfer[17], bw[19]; int pool_secs; timersub(&total_tv_end, &total_tv_start, &diff); hours = diff.tv_sec / 3600; mins = (diff.tv_sec % 3600) / 60; secs = diff.tv_sec % 60; utility = total_accepted / total_secs * 60; efficiency = total_bytes_xfer ? total_diff_accepted * 2048. / total_bytes_xfer : 0.0; applog(LOG_WARNING, "\nSummary of runtime statistics:\n"); applog(LOG_WARNING, "Started at %s", datestamp); if (total_pools == 1) applog(LOG_WARNING, "Pool: %s", pools[0]->rpc_url); #ifdef WANT_CPUMINE if (opt_n_threads) applog(LOG_WARNING, "CPU hasher algorithm used: %s", algo_names[opt_algo]); #endif applog(LOG_WARNING, "Runtime: %d hrs : %d mins : %d secs", hours, mins, secs); applog(LOG_WARNING, "Average hashrate: %.1f Megahash/s", total_mhashes_done / total_secs); applog(LOG_WARNING, "Solved blocks: %d", found_blocks); applog(LOG_WARNING, "Best share difficulty: %s", best_share); applog(LOG_WARNING, "Share submissions: %d", total_accepted + total_rejected); applog(LOG_WARNING, "Accepted shares: %d", total_accepted); applog(LOG_WARNING, "Rejected shares: %d + %d stale (%.2f%%)", total_rejected, total_stale, (float)(total_rejected + total_stale) / (float)(total_rejected + total_stale + total_accepted) ); applog(LOG_WARNING, "Accepted difficulty shares: %1.f", total_diff_accepted); applog(LOG_WARNING, "Rejected difficulty shares: %1.f", total_diff_rejected); applog(LOG_WARNING, "Hardware errors: %d", hw_errors); applog(LOG_WARNING, "Network transfer: %s (%s)", multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2, (float)total_bytes_rcvd, (float)total_bytes_sent), multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2, (float)(total_bytes_rcvd / total_secs), (float)(total_bytes_sent / total_secs))); applog(LOG_WARNING, "Efficiency (accepted shares * difficulty / 2 KB): %.2f", efficiency); applog(LOG_WARNING, "Utility (accepted shares / min): %.2f/min\n", utility); applog(LOG_WARNING, "Unable to get work from server occasions: %d", total_go); applog(LOG_WARNING, "Work items generated locally: %d", local_work); applog(LOG_WARNING, "Submitting work remotely delay occasions: %d", total_ro); applog(LOG_WARNING, "New blocks detected on network: %d\n", new_blocks); if (total_pools > 1) { for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; applog(LOG_WARNING, "Pool: %s", pool->rpc_url); if (pool->solved) applog(LOG_WARNING, "SOLVED %d BLOCK%s!", pool->solved, pool->solved > 1 ? "S" : ""); applog(LOG_WARNING, " Share submissions: %d", pool->accepted + pool->rejected); applog(LOG_WARNING, " Accepted shares: %d", pool->accepted); applog(LOG_WARNING, " Rejected shares: %d + %d stale (%.2f%%)", pool->rejected, pool->stale_shares, (float)(pool->rejected + pool->stale_shares) / (float)(pool->rejected + pool->stale_shares + pool->accepted) ); applog(LOG_WARNING, " Accepted difficulty shares: %1.f", pool->diff_accepted); applog(LOG_WARNING, " Rejected difficulty shares: %1.f", pool->diff_rejected); pool_secs = timer_elapsed(&pool->cgminer_stats.start_tv, NULL); applog(LOG_WARNING, " Network transfer: %s (%s)", multi_format_unit2(xfer, sizeof(xfer), true, "B", H2B_SPACED, " / ", 2, (float)pool->cgminer_pool_stats.net_bytes_received, (float)pool->cgminer_pool_stats.net_bytes_sent), multi_format_unit2(bw, sizeof(bw), true, "B/s", H2B_SPACED, " / ", 2, (float)(pool->cgminer_pool_stats.net_bytes_received / pool_secs), (float)(pool->cgminer_pool_stats.net_bytes_sent / pool_secs))); uint64_t pool_bytes_xfer = pool->cgminer_pool_stats.net_bytes_received + pool->cgminer_pool_stats.net_bytes_sent; efficiency = pool_bytes_xfer ? pool->diff_accepted * 2048. / pool_bytes_xfer : 0.0; applog(LOG_WARNING, " Efficiency (accepted * difficulty / 2 KB): %.2f", efficiency); applog(LOG_WARNING, " Items worked on: %d", pool->works); applog(LOG_WARNING, " Unable to get work from server occasions: %d", pool->getfail_occasions); applog(LOG_WARNING, " Submitting work remotely delay occasions: %d\n", pool->remotefail_occasions); } } applog(LOG_WARNING, "Summary of per device statistics:\n"); for (i = 0; i < total_devices; ++i) { struct cgpu_info *cgpu = get_devices(i); if ((!cgpu->proc_id) && cgpu->procs > 1) { // Device summary line opt_show_procs = false; log_print_status(cgpu); opt_show_procs = true; } log_print_status(cgpu); } if (opt_shares) { applog(LOG_WARNING, "Mined %.0f accepted shares of %d requested\n", total_diff_accepted, opt_shares); if (opt_shares > total_diff_accepted) applog(LOG_WARNING, "WARNING - Mined only %.0f shares of %d requested.", total_diff_accepted, opt_shares); } applog(LOG_WARNING, " "); fflush(stderr); fflush(stdout); } void _bfg_clean_up(bool restarting) { #ifdef HAVE_OPENCL clear_adl(nDevs); #endif #ifdef HAVE_LIBUSB if (likely(have_libusb)) libusb_exit(NULL); #endif cgtime(&total_tv_end); #ifdef WIN32 timeEndPeriod(1); #endif if (!restarting) { /* Attempting to disable curses or print a summary during a * restart can lead to a deadlock. */ #ifdef HAVE_CURSES disable_curses(); #endif if (!opt_realquiet && successful_connect) print_summary(); } if (opt_n_threads) free(cpus); curl_global_cleanup(); #ifdef WIN32 WSACleanup(); #endif } void _quit(int status) { if (status) { const char *ev = getenv("__BFGMINER_SEGFAULT_ERRQUIT"); if (unlikely(ev && ev[0] && ev[0] != '0')) { int *p = NULL; // NOTE debugger can bypass with: p = &p *p = status; // Segfault, hopefully dumping core } } #if defined(unix) || defined(__APPLE__) if (forkpid > 0) { kill(forkpid, SIGTERM); forkpid = 0; } #endif exit(status); } #ifdef HAVE_CURSES char *curses_input(const char *query) { char *input; echo(); input = malloc(255); if (!input) quit(1, "Failed to malloc input"); leaveok(logwin, false); wlogprint("%s:\n", query); wgetnstr(logwin, input, 255); if (!strlen(input)) { free(input); input = NULL; } leaveok(logwin, true); noecho(); return input; } #endif static bool pools_active = false; static void *test_pool_thread(void *arg) { struct pool *pool = (struct pool *)arg; if (pool_active(pool, false)) { pool_tset(pool, &pool->lagging); pool_tclear(pool, &pool->idle); bool first_pool = false; cg_wlock(&control_lock); if (!pools_active) { currentpool = pool; if (pool->pool_no != 0) first_pool = true; pools_active = true; } cg_wunlock(&control_lock); if (unlikely(first_pool)) applog(LOG_NOTICE, "Switching to pool %d %s - first alive pool", pool->pool_no, pool->rpc_url); else applog(LOG_NOTICE, "Pool %d %s alive", pool->pool_no, pool->rpc_url); switch_pools(NULL); } else pool_died(pool); pool->testing = false; return NULL; } /* Always returns true that the pool details were added unless we are not * live, implying this is the only pool being added, so if no pools are * active it returns false. */ bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass) { size_t siz; pool->rpc_url = url; pool->rpc_user = user; pool->rpc_pass = pass; siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2; pool->rpc_userpass = malloc(siz); if (!pool->rpc_userpass) quit(1, "Failed to malloc userpass"); snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass); pool->testing = true; pool->idle = true; enable_pool(pool); pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool); if (!live) { pthread_join(pool->test_thread, NULL); return pools_active; } return true; } #ifdef HAVE_CURSES static bool input_pool(bool live) { char *url = NULL, *user = NULL, *pass = NULL; struct pool *pool; bool ret = false; immedok(logwin, true); wlogprint("Input server details.\n"); url = curses_input("URL"); if (!url) goto out; user = curses_input("Username"); if (!user) goto out; pass = curses_input("Password"); if (!pass) pass = calloc(1, 1); pool = add_pool(); if (!detect_stratum(pool, url) && strncmp(url, "http://", 7) && strncmp(url, "https://", 8)) { char *httpinput; httpinput = malloc(256); if (!httpinput) quit(1, "Failed to malloc httpinput"); strcpy(httpinput, "http://"); strncat(httpinput, url, 248); free(url); url = httpinput; } ret = add_pool_details(pool, live, url, user, pass); out: immedok(logwin, false); if (!ret) { if (url) free(url); if (user) free(user); if (pass) free(pass); } return ret; } #endif #if defined(unix) || defined(__APPLE__) static void fork_monitor() { // Make a pipe: [readFD, writeFD] int pfd[2]; int r = pipe(pfd); if (r < 0) { perror("pipe - failed to create pipe for --monitor"); exit(1); } // Make stderr write end of pipe fflush(stderr); r = dup2(pfd[1], 2); if (r < 0) { perror("dup2 - failed to alias stderr to write end of pipe for --monitor"); exit(1); } r = close(pfd[1]); if (r < 0) { perror("close - failed to close write end of pipe for --monitor"); exit(1); } // Don't allow a dying monitor to kill the main process sighandler_t sr0 = signal(SIGPIPE, SIG_IGN); sighandler_t sr1 = signal(SIGPIPE, SIG_IGN); if (SIG_ERR == sr0 || SIG_ERR == sr1) { perror("signal - failed to edit signal mask for --monitor"); exit(1); } // Fork a child process forkpid = fork(); if (forkpid < 0) { perror("fork - failed to fork child process for --monitor"); exit(1); } // Child: launch monitor command if (0 == forkpid) { // Make stdin read end of pipe r = dup2(pfd[0], 0); if (r < 0) { perror("dup2 - in child, failed to alias read end of pipe to stdin for --monitor"); exit(1); } close(pfd[0]); if (r < 0) { perror("close - in child, failed to close read end of pipe for --monitor"); exit(1); } // Launch user specified command execl("/bin/bash", "/bin/bash", "-c", opt_stderr_cmd, (char*)NULL); perror("execl - in child failed to exec user specified command for --monitor"); exit(1); } // Parent: clean up unused fds and bail r = close(pfd[0]); if (r < 0) { perror("close - failed to close read end of pipe for --monitor"); exit(1); } } #endif // defined(unix) #ifdef HAVE_CURSES #ifdef USE_UNICODE static wchar_t select_unicode_char(const wchar_t *opt) { for ( ; *opt; ++opt) if (iswprint(*opt)) return *opt; return '?'; } #endif void enable_curses(void) { int x; __maybe_unused int y; lock_curses(); if (curses_active) { unlock_curses(); return; } #ifdef USE_UNICODE if (use_unicode) { setlocale(LC_CTYPE, ""); if (iswprint(0xb0)) have_unicode_degrees = true; unicode_micro = select_unicode_char(L"\xb5\u03bcu"); } #endif mainwin = initscr(); start_color(); #if defined(PDCURSES) || defined(NCURSES_VERSION) if (ERR != use_default_colors()) default_bgcolor = -1; #endif if (has_colors() && ERR != init_pair(1, COLOR_WHITE, COLOR_BLUE)) { menu_attr = COLOR_PAIR(1); if (ERR != init_pair(2, COLOR_RED, default_bgcolor)) attr_bad |= COLOR_PAIR(2); } keypad(mainwin, true); getmaxyx(mainwin, y, x); statuswin = newwin(logstart, x, 0, 0); leaveok(statuswin, true); // For whatever reason, PDCurses crashes if the logwin is initialized to height y-logcursor // We resize the window later anyway, so just start it off at 1 :) logwin = newwin(1, 0, logcursor, 0); idlok(logwin, true); scrollok(logwin, true); leaveok(logwin, true); cbreak(); noecho(); nonl(); curses_active = true; statusy = logstart; unlock_curses(); } #endif /* TODO: fix need a dummy CPU device_drv even if no support for CPU mining */ #ifndef WANT_CPUMINE struct device_drv cpu_drv; struct device_drv cpu_drv = { .name = "CPU", }; #endif static int cgminer_id_count = 0; static int device_line_id_count; void register_device(struct cgpu_info *cgpu) { cgpu->deven = DEV_ENABLED; wr_lock(&devices_lock); devices[cgpu->cgminer_id = cgminer_id_count++] = cgpu; wr_unlock(&devices_lock); if (!cgpu->proc_id) cgpu->device_line_id = device_line_id_count++; mining_threads += cgpu->threads ?: 1; #ifdef HAVE_CURSES adj_width(mining_threads, &dev_width); #endif rwlock_init(&cgpu->qlock); cgpu->queued_work = NULL; } struct _cgpu_devid_counter { char name[4]; int lastid; UT_hash_handle hh; }; void renumber_cgpu(struct cgpu_info *cgpu) { static struct _cgpu_devid_counter *devids = NULL; struct _cgpu_devid_counter *d; HASH_FIND_STR(devids, cgpu->drv->name, d); if (d) cgpu->device_id = ++d->lastid; else { d = malloc(sizeof(*d)); memcpy(d->name, cgpu->drv->name, sizeof(d->name)); cgpu->device_id = d->lastid = 0; HASH_ADD_STR(devids, name, d); } } static bool my_blkmaker_sha256_callback(void *digest, const void *buffer, size_t length) { sha256(buffer, length, digest); return true; } #ifndef HAVE_PTHREAD_CANCEL extern void setup_pthread_cancel_workaround(); extern struct sigaction pcwm_orig_term_handler; #endif bool bfg_need_detect_rescan; extern void probe_device(struct lowlevel_device_info *); static void drv_detect_all() { rescan: bfg_need_detect_rescan = false; #ifdef HAVE_BFG_LOWLEVEL struct lowlevel_device_info * const infolist = lowlevel_scan(), *info, *infotmp; LL_FOREACH_SAFE(infolist, info, infotmp) probe_device(info); LL_FOREACH_SAFE(infolist, info, infotmp) pthread_join(info->probe_pth, NULL); #endif struct driver_registration *reg, *tmp; const int algomatch = opt_scrypt ? POW_SCRYPT : POW_SHA256D; BFG_FOREACH_DRIVER_BY_PRIORITY(reg, tmp) { const struct device_drv * const drv = reg->drv; const supported_algos_t algos = drv->supported_algos ?: POW_SHA256D; if (0 == (algos & algomatch) || !drv->drv_detect) continue; drv->drv_detect(); } #ifdef HAVE_BFG_LOWLEVEL lowlevel_scan_free(); #endif if (bfg_need_detect_rescan) { applog(LOG_DEBUG, "Device rescan requested"); goto rescan; } } static void allocate_cgpu(struct cgpu_info *cgpu, unsigned int *kp) { struct thr_info *thr; int j; struct device_drv *api = cgpu->drv; if (!cgpu->devtype) cgpu->devtype = "PGA"; cgpu->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; int threadobj = cgpu->threads; if (!threadobj) // Create a fake thread object to handle hashmeter etc threadobj = 1; cgpu->thr = calloc(threadobj + 1, sizeof(*cgpu->thr)); cgpu->thr[threadobj] = NULL; cgpu->status = LIFE_INIT; if (opt_devices_enabled_list) { struct string_elist *enablestr_elist; cgpu->deven = DEV_DISABLED; DL_FOREACH(opt_devices_enabled_list, enablestr_elist) { const char * const enablestr = enablestr_elist->string; if (cgpu_match(enablestr, cgpu)) { cgpu->deven = DEV_ENABLED; break; } } } cgpu->max_hashes = 0; // Setup thread structs before starting any of the threads, in case they try to interact for (j = 0; j < threadobj; ++j, ++*kp) { thr = get_thread(*kp); thr->id = *kp; thr->cgpu = cgpu; thr->device_thread = j; thr->work_restart_notifier[1] = INVSOCK; thr->mutex_request[1] = INVSOCK; thr->_job_transition_in_progress = true; timerclear(&thr->tv_morework); thr->scanhash_working = true; thr->hashes_done = 0; timerclear(&thr->tv_hashes_done); cgtime(&thr->tv_lastupdate); thr->tv_poll.tv_sec = -1; thr->_max_nonce = api->can_limit_work ? api->can_limit_work(thr) : 0xffffffff; cgpu->thr[j] = thr; } if (!cgpu->device->threads) notifier_init_invalid(cgpu->thr[0]->notifier); else if (!cgpu->threads) memcpy(&cgpu->thr[0]->notifier, &cgpu->device->thr[0]->notifier, sizeof(cgpu->thr[0]->notifier)); else for (j = 0; j < cgpu->threads; ++j) { thr = cgpu->thr[j]; notifier_init(thr->notifier); } } static void start_cgpu(struct cgpu_info *cgpu) { struct thr_info *thr; int j; for (j = 0; j < cgpu->threads; ++j) { thr = cgpu->thr[j]; /* Enable threads for devices set not to mine but disable * their queue in case we wish to enable them later */ if (cgpu->drv->thread_prepare && !cgpu->drv->thread_prepare(thr)) continue; thread_reportout(thr); if (unlikely(thr_info_create(thr, NULL, miner_thread, thr))) quit(1, "thread %d create failed", thr->id); notifier_wake(thr->notifier); } if (cgpu->deven == DEV_ENABLED) proc_enable(cgpu); } static void _scan_serial(void *p) { const char *s = p; struct string_elist *iter, *tmp; struct string_elist *orig_scan_devices = scan_devices; if (s) { // Make temporary scan_devices list scan_devices = NULL; string_elist_add("noauto", &scan_devices); add_serial(s); } drv_detect_all(); if (s) { DL_FOREACH_SAFE(scan_devices, iter, tmp) { string_elist_del(&scan_devices, iter); } scan_devices = orig_scan_devices; } } #ifdef HAVE_BFG_LOWLEVEL static bool _probe_device_match(const struct lowlevel_device_info * const info, const char * const ser) { if (!(false || (info->serial && !strcasecmp(ser, info->serial)) || (info->path && !strcasecmp(ser, info->path )) || (info->devid && !strcasecmp(ser, info->devid )) )) { char *devid = devpath_to_devid(ser); if (!devid) return false; const bool different = strcmp(info->devid, devid); free(devid); if (different) return false; } return true; } static const struct device_drv *_probe_device_find_drv(const char * const _dname, const size_t dnamelen) { struct driver_registration *dreg; char dname[dnamelen]; int i; for (i = 0; i < dnamelen; ++i) dname[i] = tolower(_dname[i]); BFG_FIND_DRV_BY_DNAME(dreg, dname, dnamelen); if (!dreg) { for (i = 0; i < dnamelen; ++i) dname[i] = toupper(_dname[i]); BFG_FIND_DRV_BY_NAME(dreg, dname, dnamelen); if (!dreg) return NULL; } return dreg->drv; } static bool _probe_device_internal(struct lowlevel_device_info * const info, const char * const dname, const size_t dnamelen) { const struct device_drv * const drv = _probe_device_find_drv(dname, dnamelen); if (!(drv && drv->lowl_probe)) return false; return drv->lowl_probe(info); } static void *probe_device_thread(void *p) { struct lowlevel_device_info * const infolist = p; struct lowlevel_device_info *info = infolist; { char threadname[5 + strlen(info->devid) + 1]; sprintf(threadname, "probe_%s", info->devid); RenameThread(threadname); } // If already in use, ignore if (bfg_claim_any(NULL, NULL, info->devid)) applogr(NULL, LOG_DEBUG, "%s: \"%s\" already in use", __func__, info->product); // if lowlevel device matches specific user assignment, probe requested driver(s) struct string_elist *sd_iter, *sd_tmp; struct driver_registration *dreg, *dreg_tmp; DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp) { const char * const dname = sd_iter->string; const char * const colon = strpbrk(dname, ":@"); if (!(colon && colon != dname)) continue; const char * const ser = &colon[1]; LL_FOREACH2(infolist, info, same_devid_next) { if (!_probe_device_match(info, ser)) continue; const size_t dnamelen = (colon - dname); if (_probe_device_internal(info, dname, dnamelen)) return NULL; } } // probe driver(s) with auto enabled and matching VID/PID/Product/etc of device BFG_FOREACH_DRIVER_BY_PRIORITY(dreg, dreg_tmp) { const struct device_drv * const drv = dreg->drv; // Check for "noauto" flag // NOTE: driver-specific configuration overrides general bool doauto = true; DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp) { const char * const dname = sd_iter->string; // NOTE: Only checking flags here, NOT path/serial, so @ is unacceptable const char *colon = strchr(dname, ':'); if (!colon) colon = &dname[-1]; if (strcasecmp("noauto", &colon[1]) && strcasecmp("auto", &colon[1])) continue; const ssize_t dnamelen = (colon - dname); if (dnamelen >= 0 && _probe_device_find_drv(dname, dnamelen) != drv) continue; doauto = (tolower(colon[1]) == 'a'); if (dnamelen != -1) break; } if (doauto && drv->lowl_match) { LL_FOREACH2(infolist, info, same_devid_next) { if (!drv->lowl_match(info)) continue; if (drv->lowl_probe(info)) return NULL; } } } // probe driver(s) with 'all' enabled DL_FOREACH_SAFE(scan_devices, sd_iter, sd_tmp) { const char * const dname = sd_iter->string; // NOTE: Only checking flags here, NOT path/serial, so @ is unacceptable const char * const colon = strchr(dname, ':'); if (!colon) { LL_FOREACH2(infolist, info, same_devid_next) { if ( #ifdef NEED_BFG_LOWL_VCOM (info->lowl == &lowl_vcom && !strcasecmp(dname, "all")) || #endif _probe_device_match(info, (dname[0] == '@') ? &dname[1] : dname)) { BFG_FOREACH_DRIVER_BY_PRIORITY(dreg, dreg_tmp) { const struct device_drv * const drv = dreg->drv; if (drv->lowl_probe_by_name_only) continue; if (!drv->lowl_probe) continue; if (drv->lowl_probe(info)) return NULL; } break; } } continue; } if (strcasecmp(&colon[1], "all")) continue; const size_t dnamelen = (colon - dname); LL_FOREACH2(infolist, info, same_devid_next) { if (_probe_device_internal(info, dname, dnamelen)) return NULL; } } return NULL; } void probe_device(struct lowlevel_device_info * const info) { pthread_create(&info->probe_pth, NULL, probe_device_thread, info); } #endif int create_new_cgpus(void (*addfunc)(void*), void *arg) { static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; int devcount, i, mining_threads_new = 0; unsigned int k; struct cgpu_info *cgpu; struct thr_info *thr; void *p; char *dummy = "\0"; mutex_lock(&mutex); devcount = total_devices; addfunc(arg); if (!total_devices_new) goto out; wr_lock(&devices_lock); p = realloc(devices, sizeof(struct cgpu_info *) * (total_devices + total_devices_new + 1)); if (unlikely(!p)) { wr_unlock(&devices_lock); applog(LOG_ERR, "scan_serial: realloc failed trying to grow devices array"); goto out; } devices = p; wr_unlock(&devices_lock); for (i = 0; i < total_devices_new; ++i) { cgpu = devices_new[i]; mining_threads_new += cgpu->threads ?: 1; } wr_lock(&mining_thr_lock); mining_threads_new += mining_threads; p = realloc(mining_thr, sizeof(struct thr_info *) * mining_threads_new); if (unlikely(!p)) { wr_unlock(&mining_thr_lock); applog(LOG_ERR, "scan_serial: realloc failed trying to grow mining_thr"); goto out; } mining_thr = p; wr_unlock(&mining_thr_lock); for (i = mining_threads; i < mining_threads_new; ++i) { mining_thr[i] = calloc(1, sizeof(*thr)); if (!mining_thr[i]) { applog(LOG_ERR, "scan_serial: Failed to calloc mining_thr[%d]", i); for ( ; --i >= mining_threads; ) free(mining_thr[i]); goto out; } } k = mining_threads; for (i = 0; i < total_devices_new; ++i) { cgpu = devices_new[i]; load_temp_config_cgpu(cgpu, &dummy, &dummy); allocate_cgpu(cgpu, &k); start_cgpu(cgpu); register_device(cgpu); ++total_devices; } #ifdef HAVE_CURSES switch_logsize(); #endif out: total_devices_new = 0; devcount = total_devices - devcount; mutex_unlock(&mutex); return devcount; } int scan_serial(const char *s) { return create_new_cgpus(_scan_serial, (void*)s); } static void probe_pools(void) { int i; for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; pool->testing = true; pthread_create(&pool->test_thread, NULL, test_pool_thread, (void *)pool); } } static void raise_fd_limits(void) { #ifdef HAVE_SETRLIMIT struct rlimit fdlimit; unsigned long old_soft_limit; char frombuf[0x10] = "unlimited"; char hardbuf[0x10] = "unlimited"; if (getrlimit(RLIMIT_NOFILE, &fdlimit)) applogr(, LOG_DEBUG, "setrlimit: Failed to getrlimit(RLIMIT_NOFILE)"); old_soft_limit = fdlimit.rlim_cur; if (fdlimit.rlim_max > FD_SETSIZE || fdlimit.rlim_max == RLIM_INFINITY) fdlimit.rlim_cur = FD_SETSIZE; else fdlimit.rlim_cur = fdlimit.rlim_max; if (fdlimit.rlim_max != RLIM_INFINITY) snprintf(hardbuf, sizeof(hardbuf), "%lu", (unsigned long)fdlimit.rlim_max); if (old_soft_limit != RLIM_INFINITY) snprintf(frombuf, sizeof(frombuf), "%lu", old_soft_limit); if (fdlimit.rlim_cur == old_soft_limit) applogr(, LOG_DEBUG, "setrlimit: Soft fd limit not being changed from %lu (FD_SETSIZE=%lu; hard limit=%s)", old_soft_limit, (unsigned long)FD_SETSIZE, hardbuf); if (setrlimit(RLIMIT_NOFILE, &fdlimit)) applogr(, LOG_DEBUG, "setrlimit: Failed to change soft fd limit from %s to %lu (FD_SETSIZE=%lu; hard limit=%s)", frombuf, (unsigned long)fdlimit.rlim_cur, (unsigned long)FD_SETSIZE, hardbuf); applog(LOG_DEBUG, "setrlimit: Changed soft fd limit from %s to %lu (FD_SETSIZE=%lu; hard limit=%s)", frombuf, (unsigned long)fdlimit.rlim_cur, (unsigned long)FD_SETSIZE, hardbuf); #else applog(LOG_DEBUG, "setrlimit: Not supported by platform"); #endif } extern void bfg_init_threadlocal(); extern void stratumsrv_start(); int main(int argc, char *argv[]) { struct sigaction handler; struct thr_info *thr; struct block *block; unsigned int k; int i; char *s; #ifdef WIN32 LoadLibrary("backtrace.dll"); #endif blkmk_sha256_impl = my_blkmaker_sha256_callback; bfg_init_threadlocal(); #ifndef HAVE_PTHREAD_CANCEL setup_pthread_cancel_workaround(); #endif bfg_init_checksums(); #ifdef WIN32 { WSADATA wsa; i = WSAStartup(MAKEWORD(2, 2), &wsa); if (i) quit(1, "Failed to initialise Winsock: %s", bfg_strerror(i, BST_SOCKET)); } #endif /* This dangerous functions tramples random dynamically allocated * variables so do it before anything at all */ if (unlikely(curl_global_init(CURL_GLOBAL_ALL))) quit(1, "Failed to curl_global_init"); initial_args = malloc(sizeof(char *) * (argc + 1)); for (i = 0; i < argc; i++) initial_args[i] = strdup(argv[i]); initial_args[argc] = NULL; mutex_init(&hash_lock); mutex_init(&console_lock); cglock_init(&control_lock); mutex_init(&stats_lock); mutex_init(&sharelog_lock); cglock_init(&ch_lock); mutex_init(&sshare_lock); rwlock_init(&blk_lock); rwlock_init(&netacc_lock); rwlock_init(&mining_thr_lock); rwlock_init(&devices_lock); mutex_init(&lp_lock); if (unlikely(pthread_cond_init(&lp_cond, NULL))) quit(1, "Failed to pthread_cond_init lp_cond"); if (unlikely(pthread_cond_init(&gws_cond, NULL))) quit(1, "Failed to pthread_cond_init gws_cond"); notifier_init(submit_waiting_notifier); snprintf(packagename, sizeof(packagename), "%s %s", PACKAGE, VERSION); #ifdef WANT_CPUMINE init_max_name_len(); #endif handler.sa_handler = &sighandler; handler.sa_flags = 0; sigemptyset(&handler.sa_mask); #ifdef HAVE_PTHREAD_CANCEL sigaction(SIGTERM, &handler, &termhandler); #else // Need to let pthread_cancel emulation handle SIGTERM first termhandler = pcwm_orig_term_handler; pcwm_orig_term_handler = handler; #endif sigaction(SIGINT, &handler, &inthandler); #ifndef WIN32 signal(SIGPIPE, SIG_IGN); #else timeBeginPeriod(1); #endif opt_kernel_path = CGMINER_PREFIX; cgminer_path = alloca(PATH_MAX); s = strdup(argv[0]); strcpy(cgminer_path, dirname(s)); free(s); strcat(cgminer_path, "/"); #ifdef WANT_CPUMINE // Hack to make cgminer silent when called recursively on WIN32 int skip_to_bench = 0; #if defined(WIN32) char buf[32]; if (GetEnvironmentVariable("BFGMINER_BENCH_ALGO", buf, 16)) skip_to_bench = 1; if (GetEnvironmentVariable("CGMINER_BENCH_ALGO", buf, 16)) skip_to_bench = 1; #endif // defined(WIN32) #endif devcursor = 8; logstart = devcursor; logcursor = logstart; block = calloc(sizeof(struct block), 1); if (unlikely(!block)) quit (1, "main OOM"); for (i = 0; i < 36; i++) strcat(block->hash, "0"); HASH_ADD_STR(blocks, hash, block); strcpy(current_block, block->hash); mutex_init(&submitting_lock); #ifdef HAVE_OPENCL memset(gpus, 0, sizeof(gpus)); for (i = 0; i < MAX_GPUDEVICES; i++) gpus[i].dynamic = true; #endif schedstart.tm.tm_sec = 1; schedstop .tm.tm_sec = 1; /* parse command line */ opt_register_table(opt_config_table, "Options for both config file and command line"); opt_register_table(opt_cmdline_table, "Options for command line only"); opt_parse(&argc, argv, applog_and_exit); if (argc != 1) quit(1, "Unexpected extra commandline arguments"); if (!config_loaded) load_default_config(); #ifndef HAVE_PTHREAD_CANCEL // Can't do this any earlier, or config isn't loaded applog(LOG_DEBUG, "pthread_cancel workaround in use"); #endif #ifdef HAVE_PWD_H struct passwd *user_info = NULL; if (opt_setuid != NULL) { if ((user_info = getpwnam(opt_setuid)) == NULL) { quit(1, "Unable to find setuid user information"); } } #endif #ifdef HAVE_CHROOT if (chroot_dir != NULL) { #ifdef HAVE_PWD_H if (user_info == NULL && getuid() == 0) { applog(LOG_WARNING, "Running as root inside chroot"); } #endif if (chroot(chroot_dir) != 0) { quit(1, "Unable to chroot"); } if (chdir("/")) quit(1, "Unable to chdir to chroot"); } #endif #ifdef HAVE_PWD_H if (user_info != NULL) { if (setgid((*user_info).pw_gid) != 0) quit(1, "Unable to setgid"); if (setuid((*user_info).pw_uid) != 0) quit(1, "Unable to setuid"); } #endif raise_fd_limits(); if (opt_benchmark) { struct pool *pool; if (opt_scrypt) quit(1, "Cannot use benchmark mode with scrypt"); want_longpoll = false; pool = add_pool(); pool->rpc_url = malloc(255); strcpy(pool->rpc_url, "Benchmark"); pool->rpc_user = pool->rpc_url; pool->rpc_pass = pool->rpc_url; enable_pool(pool); pool->idle = false; successful_connect = true; } if (opt_unittest) { test_cgpu_match(); test_intrange(); test_decimal_width(); utf8_test(); } #ifdef HAVE_CURSES if (opt_realquiet || opt_display_devs) use_curses = false; setlocale(LC_ALL, "C"); if (use_curses) enable_curses(); #endif #ifdef HAVE_LIBUSB int err = libusb_init(NULL); if (err) applog(LOG_WARNING, "libusb_init() failed err %d", err); else have_libusb = true; #endif applog(LOG_WARNING, "Started %s", packagename); if (cnfbuf) { applog(LOG_NOTICE, "Loaded configuration file %s", cnfbuf); switch (fileconf_load) { case 0: applog(LOG_WARNING, "Fatal JSON error in configuration file."); applog(LOG_WARNING, "Configuration file could not be used."); break; case -1: applog(LOG_WARNING, "Error in configuration file, partially loaded."); if (use_curses) applog(LOG_WARNING, "Start BFGMiner with -T to see what failed to load."); break; default: break; } free(cnfbuf); cnfbuf = NULL; } i = strlen(opt_kernel_path) + 2; char __kernel_path[i]; snprintf(__kernel_path, i, "%s/", opt_kernel_path); opt_kernel_path = __kernel_path; if (want_per_device_stats) opt_log_output = true; #ifdef WANT_CPUMINE #ifdef USE_SCRYPT if (opt_scrypt) set_scrypt_algo(&opt_algo); else #endif if (0 <= opt_bench_algo) { double rate = bench_algo_stage3(opt_bench_algo); if (!skip_to_bench) printf("%.5f (%s)\n", rate, algo_names[opt_bench_algo]); else { // Write result to shared memory for parent #if defined(WIN32) char unique_name[64]; if (GetEnvironmentVariable("BFGMINER_SHARED_MEM", unique_name, 32) || GetEnvironmentVariable("CGMINER_SHARED_MEM", unique_name, 32)) { HANDLE map_handle = CreateFileMapping( INVALID_HANDLE_VALUE, // use paging file NULL, // default security attributes PAGE_READWRITE, // read/write access 0, // size: high 32-bits 4096, // size: low 32-bits unique_name // name of map object ); if (NULL != map_handle) { void *shared_mem = MapViewOfFile( map_handle, // object to map view of FILE_MAP_WRITE, // read/write access 0, // high offset: map from 0, // low offset: beginning 0 // default: map entire file ); if (NULL != shared_mem) CopyMemory(shared_mem, &rate, sizeof(rate)); (void)UnmapViewOfFile(shared_mem); } (void)CloseHandle(map_handle); } #endif } exit(0); } #endif bfg_devapi_init(); drv_detect_all(); total_devices = total_devices_new; devices = devices_new; total_devices_new = 0; devices_new = NULL; if (opt_display_devs) { int devcount = 0; applog(LOG_ERR, "Devices detected:"); for (i = 0; i < total_devices; ++i) { struct cgpu_info *cgpu = devices[i]; char buf[0x100]; if (cgpu->device != cgpu) continue; if (cgpu->name) snprintf(buf, sizeof(buf), " %s", cgpu->name); else if (cgpu->dev_manufacturer) snprintf(buf, sizeof(buf), " %s by %s", (cgpu->dev_product ?: "Device"), cgpu->dev_manufacturer); else if (cgpu->dev_product) snprintf(buf, sizeof(buf), " %s", cgpu->dev_product); else strcpy(buf, " Device"); tailsprintf(buf, sizeof(buf), " (driver=%s; procs=%d", cgpu->drv->dname, cgpu->procs); if (cgpu->dev_serial) tailsprintf(buf, sizeof(buf), "; serial=%s", cgpu->dev_serial); if (cgpu->device_path) tailsprintf(buf, sizeof(buf), "; path=%s", cgpu->device_path); tailsprintf(buf, sizeof(buf), ")"); _applog(LOG_NOTICE, buf); ++devcount; } quit(0, "%d devices listed", devcount); } mining_threads = 0; for (i = 0; i < total_devices; ++i) register_device(devices[i]); if (!total_devices) { applog(LOG_WARNING, "No devices detected!"); if (use_curses) applog(LOG_WARNING, "Waiting for devices; press 'M+' to add, or 'Q' to quit"); else applog(LOG_WARNING, "Waiting for devices"); } load_temp_config(); #ifdef HAVE_CURSES switch_logsize(); #endif if (!total_pools) { applog(LOG_WARNING, "Need to specify at least one pool server."); #ifdef HAVE_CURSES if (!use_curses || !input_pool(false)) #endif quit(1, "Pool setup failed"); } for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; size_t siz; pool->cgminer_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; pool->cgminer_pool_stats.getwork_wait_min.tv_sec = MIN_SEC_UNSET; if (!pool->rpc_url) quit(1, "No URI supplied for pool %u", i); if (!pool->rpc_userpass) { if (!pool->rpc_user || !pool->rpc_pass) quit(1, "No login credentials supplied for pool %u %s", i, pool->rpc_url); siz = strlen(pool->rpc_user) + strlen(pool->rpc_pass) + 2; pool->rpc_userpass = malloc(siz); if (!pool->rpc_userpass) quit(1, "Failed to malloc userpass"); snprintf(pool->rpc_userpass, siz, "%s:%s", pool->rpc_user, pool->rpc_pass); } } /* Set the currentpool to pool with priority 0 */ validate_pool_priorities(); for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; if (!pool->prio) currentpool = pool; } #ifdef HAVE_SYSLOG_H if (use_syslog) openlog(PACKAGE, LOG_PID, LOG_USER); #endif #if defined(unix) || defined(__APPLE__) if (opt_stderr_cmd) fork_monitor(); #endif // defined(unix) mining_thr = calloc(mining_threads, sizeof(thr)); if (!mining_thr) quit(1, "Failed to calloc mining_thr"); for (i = 0; i < mining_threads; i++) { mining_thr[i] = calloc(1, sizeof(*thr)); if (!mining_thr[i]) quit(1, "Failed to calloc mining_thr[%d]", i); } total_control_threads = 6; control_thr = calloc(total_control_threads, sizeof(*thr)); if (!control_thr) quit(1, "Failed to calloc control_thr"); gwsched_thr_id = 0; /* Create a unique get work queue */ getq = tq_new(); if (!getq) quit(1, "Failed to create getq"); /* We use the getq mutex as the staged lock */ stgd_lock = &getq->mutex; if (opt_benchmark) goto begin_bench; for (i = 0; i < total_pools; i++) { struct pool *pool = pools[i]; enable_pool(pool); pool->idle = true; } applog(LOG_NOTICE, "Probing for an alive pool"); do { bool still_testing; int i; /* Look for at least one active pool before starting */ probe_pools(); do { sleep(1); if (pools_active) break; still_testing = false; for (int i = 0; i < total_pools; ++i) if (pools[i]->testing) still_testing = true; } while (still_testing); if (!pools_active) { applog(LOG_ERR, "No servers were found that could be used to get work from."); applog(LOG_ERR, "Please check the details from the list below of the servers you have input"); applog(LOG_ERR, "Most likely you have input the wrong URL, forgotten to add a port, or have not set up workers"); for (i = 0; i < total_pools; i++) { struct pool *pool; pool = pools[i]; applog(LOG_WARNING, "Pool: %d URL: %s User: %s Password: %s", i, pool->rpc_url, pool->rpc_user, pool->rpc_pass); } #ifdef HAVE_CURSES if (use_curses) { halfdelay(150); applog(LOG_ERR, "Press any key to exit, or BFGMiner will try again in 15s."); if (getch() != ERR) quit(0, "No servers could be used! Exiting."); cbreak(); } else #endif quit(0, "No servers could be used! Exiting."); } } while (!pools_active); #ifdef USE_SCRYPT if (detect_algo == 1 && !opt_scrypt) { applog(LOG_NOTICE, "Detected scrypt algorithm"); opt_scrypt = true; } #endif detect_algo = 0; begin_bench: total_mhashes_done = 0; for (i = 0; i < total_devices; i++) { struct cgpu_info *cgpu = devices[i]; cgpu->rolling = cgpu->total_mhashes = 0; } cgtime(&total_tv_start); cgtime(&total_tv_end); miner_started = total_tv_start; time_t miner_start_ts = time(NULL); if (schedstart.tm.tm_sec) localtime_r(&miner_start_ts, &schedstart.tm); if (schedstop.tm.tm_sec) localtime_r(&miner_start_ts, &schedstop .tm); get_datestamp(datestamp, sizeof(datestamp), miner_start_ts); // Initialise processors and threads k = 0; for (i = 0; i < total_devices; ++i) { struct cgpu_info *cgpu = devices[i]; allocate_cgpu(cgpu, &k); } // Start threads for (i = 0; i < total_devices; ++i) { struct cgpu_info *cgpu = devices[i]; start_cgpu(cgpu); } #ifdef HAVE_OPENCL for (i = 0; i < nDevs; i++) pause_dynamic_threads(i); #endif #ifdef WANT_CPUMINE applog(LOG_INFO, "%d cpu miner threads started, " "using SHA256 '%s' algorithm.", opt_n_threads, algo_names[opt_algo]); #endif cgtime(&total_tv_start); cgtime(&total_tv_end); { pthread_t submit_thread; if (unlikely(pthread_create(&submit_thread, NULL, submit_work_thread, NULL))) quit(1, "submit_work thread create failed"); } watchpool_thr_id = 1; thr = &control_thr[watchpool_thr_id]; /* start watchpool thread */ if (thr_info_create(thr, NULL, watchpool_thread, NULL)) quit(1, "watchpool thread create failed"); pthread_detach(thr->pth); watchdog_thr_id = 2; thr = &control_thr[watchdog_thr_id]; /* start watchdog thread */ if (thr_info_create(thr, NULL, watchdog_thread, NULL)) quit(1, "watchdog thread create failed"); pthread_detach(thr->pth); #ifdef HAVE_OPENCL /* Create reinit gpu thread */ gpur_thr_id = 3; thr = &control_thr[gpur_thr_id]; thr->q = tq_new(); if (!thr->q) quit(1, "tq_new failed for gpur_thr_id"); if (thr_info_create(thr, NULL, reinit_gpu, thr)) quit(1, "reinit_gpu thread create failed"); #endif /* Create API socket thread */ api_thr_id = 4; thr = &control_thr[api_thr_id]; if (thr_info_create(thr, NULL, api_thread, thr)) quit(1, "API thread create failed"); #ifdef USE_LIBMICROHTTPD if (httpsrv_port != -1) httpsrv_start(httpsrv_port); #endif #ifdef USE_LIBEVENT if (stratumsrv_port != -1) stratumsrv_start(); #endif #ifdef HAVE_CURSES /* Create curses input thread for keyboard input. Create this last so * that we know all threads are created since this can call kill_work * to try and shut down ll previous threads. */ input_thr_id = 5; thr = &control_thr[input_thr_id]; if (thr_info_create(thr, NULL, input_thread, thr)) quit(1, "input thread create failed"); pthread_detach(thr->pth); #endif /* Just to be sure */ if (total_control_threads != 6) quit(1, "incorrect total_control_threads (%d) should be 7", total_control_threads); /* Once everything is set up, main() becomes the getwork scheduler */ while (42) { int ts, max_staged = opt_queue; struct pool *pool, *cp; bool lagging = false; struct curl_ent *ce; struct work *work; cp = current_pool(); /* If the primary pool is a getwork pool and cannot roll work, * try to stage one extra work per mining thread */ if (!pool_localgen(cp) && !staged_rollable) max_staged += mining_threads; mutex_lock(stgd_lock); ts = __total_staged(); if (!pool_localgen(cp) && !ts && !opt_fail_only) lagging = true; /* Wait until hash_pop tells us we need to create more work */ if (ts > max_staged) { staged_full = true; pthread_cond_wait(&gws_cond, stgd_lock); ts = __total_staged(); } mutex_unlock(stgd_lock); if (ts > max_staged) continue; work = make_work(); if (lagging && !pool_tset(cp, &cp->lagging)) { applog(LOG_WARNING, "Pool %d not providing work fast enough", cp->pool_no); cp->getfail_occasions++; total_go++; } pool = select_pool(lagging); retry: if (pool->has_stratum) { while (!pool->stratum_active || !pool->stratum_notify) { struct pool *altpool = select_pool(true); if (altpool == pool && pool->has_stratum) cgsleep_ms(5000); pool = altpool; goto retry; } gen_stratum_work(pool, work); applog(LOG_DEBUG, "Generated stratum work"); stage_work(work); continue; } if (pool->last_work_copy) { mutex_lock(&pool->last_work_lock); struct work *last_work = pool->last_work_copy; if (!last_work) {} else if (can_roll(last_work) && should_roll(last_work)) { struct timeval tv_now; cgtime(&tv_now); free_work(work); work = make_clone(pool->last_work_copy); mutex_unlock(&pool->last_work_lock); roll_work(work); applog(LOG_DEBUG, "Generated work from latest GBT job in get_work_thread with %d seconds left", (int)blkmk_time_left(work->tmpl, tv_now.tv_sec)); stage_work(work); continue; } else if (last_work->tmpl && pool->proto == PLP_GETBLOCKTEMPLATE && blkmk_work_left(last_work->tmpl) > (unsigned long)mining_threads) { // Don't free last_work_copy, since it is used to detect upstream provides plenty of work per template } else { free_work(last_work); pool->last_work_copy = NULL; } mutex_unlock(&pool->last_work_lock); } if (clone_available()) { applog(LOG_DEBUG, "Cloned getwork work"); free_work(work); continue; } if (opt_benchmark) { get_benchmark_work(work); applog(LOG_DEBUG, "Generated benchmark work"); stage_work(work); continue; } work->pool = pool; ce = pop_curl_entry3(pool, 2); /* obtain new work from bitcoin via JSON-RPC */ if (!get_upstream_work(work, ce->curl)) { struct pool *next_pool; /* Make sure the pool just hasn't stopped serving * requests but is up as we'll keep hammering it */ push_curl_entry(ce, pool); ++pool->seq_getfails; pool_died(pool); next_pool = select_pool(!opt_fail_only); if (pool == next_pool) { applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, retrying in 5s", pool->pool_no); cgsleep_ms(5000); } else { applog(LOG_DEBUG, "Pool %d json_rpc_call failed on get work, failover activated", pool->pool_no); pool = next_pool; } goto retry; } if (ts >= max_staged) pool_tclear(pool, &pool->lagging); if (pool_tclear(pool, &pool->idle)) pool_resus(pool); applog(LOG_DEBUG, "Generated getwork work"); stage_work(work); push_curl_entry(ce, pool); } return 0; } bfgminer-bfgminer-3.10.0/miner.h000066400000000000000000001161241226556647300164630ustar00rootroot00000000000000/* * Copyright 2012-2013 Luke Dashjr * Copyright 2011-2013 Con Kolivas * Copyright 2012-2013 Andrew Smith * Copyright 2011 Glenn Francis Murray * Copyright 2010-2011 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef __MINER_H__ #define __MINER_H__ #include "config.h" #ifdef WIN32 #include #endif #include #include #include #include #include #include #include #include #include #if defined(WORDS_BIGENDIAN) && !defined(__BIG_ENDIAN__) /* uthash.h depends on __BIG_ENDIAN__ on BE platforms */ #define __BIG_ENDIAN__ 1 #endif #include #include #include "logging.h" #include "util.h" #ifdef HAVE_OPENCL #include "CL/cl.h" #endif /* HAVE_OPENCL */ #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_ALLOCA_H # include #elif defined __GNUC__ # ifndef WIN32 # define alloca __builtin_alloca # else # include # endif #elif defined _AIX # define alloca __alloca #elif defined _MSC_VER # include # define alloca _alloca #else # ifndef HAVE_ALLOCA # ifdef __cplusplus extern "C" # endif void *alloca (size_t); # endif #endif #ifdef __MINGW32__ #include #include static inline int fsync (int fd) { return (FlushFileBuffers ((HANDLE) _get_osfhandle (fd))) ? 0 : -1; } #ifndef EWOULDBLOCK # define EWOULDBLOCK EAGAIN #endif #ifndef MSG_DONTWAIT # define MSG_DONTWAIT 0x1000000 #endif #endif /* __MINGW32__ */ #if defined (__linux) #ifndef LINUX #define LINUX #endif #endif #ifdef HAVE_ADL #include "ADL/adl_sdk.h" #endif #ifdef HAVE_LIBUSB #include #endif #ifdef USE_ZTEX #include "libztex.h" #endif #ifdef USE_BITFURY #include "libbitfury.h" #endif #ifdef HAVE_BYTESWAP_H #include #endif #ifdef HAVE_ENDIAN_H #include #endif #ifdef HAVE_SYS_ENDIAN_H #include #endif #ifdef HAVE_LIBKERN_OSBYTEORDER_H #include #endif #ifndef bswap_16 #define bswap_16(value) \ ((((value) & 0xff) << 8) | ((value) >> 8)) #define bswap_32(value) \ (((uint32_t)bswap_16((uint16_t)((value) & 0xffff)) << 16) | \ (uint32_t)bswap_16((uint16_t)((value) >> 16))) #define bswap_64(value) \ (((uint64_t)bswap_32((uint32_t)((value) & 0xffffffff)) \ << 32) | \ (uint64_t)bswap_32((uint32_t)((value) >> 32))) #endif /* This assumes htobe32 is a macro and that if it doesn't exist, then the * also won't exist */ #ifndef htobe32 # ifndef WORDS_BIGENDIAN # define htole16(x) (x) # define htole32(x) (x) # define htole64(x) (x) # define htobe16(x) bswap_16(x) # define htobe32(x) bswap_32(x) # define htobe64(x) bswap_64(x) # else # define htole16(x) bswap_16(x) # define htole32(x) bswap_32(x) # define htole64(x) bswap_64(x) # define htobe16(x) (x) # define htobe32(x) (x) # define htobe64(x) (x) # endif #endif #ifndef be32toh # define le16toh(x) htole16(x) # define le32toh(x) htole32(x) # define le64toh(x) htole64(x) # define be16toh(x) htobe16(x) # define be32toh(x) htobe32(x) # define be64toh(x) htobe64(x) #endif #ifndef max # define max(a, b) ((a) > (b) ? (a) : (b)) #endif #undef unlikely #undef likely #if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__) #define unlikely(expr) (__builtin_expect(!!(expr), 0)) #define likely(expr) (__builtin_expect(!!(expr), 1)) #else #define unlikely(expr) (expr) #define likely(expr) (expr) #endif #ifndef __maybe_unused #define __maybe_unused __attribute__((unused)) #endif #define uninitialised_var(x) x = x #if defined(__i386__) #define WANT_CRYPTOPP_ASM32 #endif #ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif #ifdef HAVE_CURSES extern int my_cancellable_getch(void); # ifdef getch // getch() is a macro static int __maybe_unused __real_getch(void) { return getch(); } # undef getch # define getch() my_cancellable_getch() # else // getch() is a real function # define __real_getch getch # define getch() my_cancellable_getch() # endif #endif enum alive { LIFE_WELL, LIFE_SICK, LIFE_DEAD, LIFE_NOSTART, LIFE_INIT, LIFE_WAIT, LIFE_INIT2, // Still initializing, but safe to call functions LIFE_DEAD2, // Totally dead, NOT safe to call functions LIFE_MIXED, // Only valid in display variables, NOT on devices }; enum pool_strategy { POOL_FAILOVER, POOL_ROUNDROBIN, POOL_ROTATE, POOL_LOADBALANCE, POOL_BALANCE, }; #define TOP_STRATEGY (POOL_BALANCE) struct strategies { const char *s; }; struct cgpu_info; #ifdef HAVE_ADL struct gpu_adl { ADLTemperature lpTemperature; int iAdapterIndex; int lpAdapterID; int iBusNumber; char strAdapterName[256]; ADLPMActivity lpActivity; ADLODParameters lpOdParameters; ADLODPerformanceLevels *DefPerfLev; ADLFanSpeedInfo lpFanSpeedInfo; ADLFanSpeedValue lpFanSpeedValue; ADLFanSpeedValue DefFanSpeedValue; int iEngineClock; int iMemoryClock; int iVddc; int iPercentage; bool autofan; bool autoengine; bool managed; /* Were the values ever changed on this card */ int lastengine; int lasttemp; int targetfan; int overtemp; int minspeed; int maxspeed; int gpu; bool has_fanspeed; struct gpu_adl *twin; }; #endif enum pow_algorithm { POW_SHA256D = 1, POW_SCRYPT = 2, }; typedef uint8_t supported_algos_t; struct api_data; struct thr_info; struct work; struct lowlevel_device_info; struct device_drv { const char *dname; const char *name; int8_t probe_priority; bool lowl_probe_by_name_only; supported_algos_t supported_algos; // DRV-global functions void (*drv_init)(); void (*drv_detect)(); bool (*lowl_match)(const struct lowlevel_device_info *); bool (*lowl_probe)(const struct lowlevel_device_info *); // Processor-specific functions void (*reinit_device)(struct cgpu_info *); bool (*override_statline_temp2)(char *buf, size_t bufsz, struct cgpu_info *, bool per_processor); struct api_data* (*get_api_extra_device_detail)(struct cgpu_info *); struct api_data* (*get_api_extra_device_status)(struct cgpu_info *); struct api_data *(*get_api_stats)(struct cgpu_info *); bool (*get_stats)(struct cgpu_info *); bool (*identify_device)(struct cgpu_info *); // e.g. to flash a led char *(*set_device)(struct cgpu_info *, char *option, char *setting, char *replybuf); void (*proc_wlogprint_status)(struct cgpu_info *); void (*proc_tui_wlogprint_choices)(struct cgpu_info *); const char *(*proc_tui_handle_choice)(struct cgpu_info *, int input); // Thread-specific functions bool (*thread_prepare)(struct thr_info *); void (*minerloop)(struct thr_info *); uint64_t (*can_limit_work)(struct thr_info *); bool (*thread_init)(struct thr_info *); bool (*prepare_work)(struct thr_info *, struct work *); int64_t (*scanhash)(struct thr_info *, struct work *, int64_t); int64_t (*scanwork)(struct thr_info *); /* Used to extract work from the hash table of queued work and tell * the main loop that it should not add any further work to the table. */ bool (*queue_full)(struct cgpu_info *); void (*flush_work)(struct cgpu_info *); void (*hw_error)(struct thr_info *); void (*thread_shutdown)(struct thr_info *); void (*thread_disable)(struct thr_info *); void (*thread_enable)(struct thr_info *); // Can be used per-thread or per-processor (only with minerloop async or queue!) void (*poll)(struct thr_info *); // === Implemented by minerloop_async === bool (*job_prepare)(struct thr_info*, struct work*, uint64_t); void (*job_start)(struct thr_info*); void (*job_get_results)(struct thr_info*, struct work*); int64_t (*job_process_results)(struct thr_info*, struct work*, bool stopping); // === Implemented by minerloop_queue === bool (*queue_append)(struct thr_info *, struct work *); void (*queue_flush)(struct thr_info *); }; enum dev_enable { DEV_ENABLED, DEV_DISABLED, // Disabled by user DEV_RECOVER, // Disabled by temperature cutoff in watchdog DEV_RECOVER_ERR, // Disabled by communications error DEV_RECOVER_DRV, // Disabled by driver }; enum cl_kernels { KL_NONE, KL_POCLBM, KL_PHATK, KL_DIAKGCN, KL_DIABLO, KL_SCRYPT, }; enum dev_reason { REASON_THREAD_FAIL_INIT, REASON_THREAD_ZERO_HASH, REASON_THREAD_FAIL_QUEUE, REASON_DEV_SICK_IDLE_60, REASON_DEV_DEAD_IDLE_600, REASON_DEV_NOSTART, REASON_DEV_OVER_HEAT, REASON_DEV_THERMAL_CUTOFF, REASON_DEV_COMMS_ERROR, REASON_DEV_THROTTLE, }; #define REASON_NONE "None" #define REASON_THREAD_FAIL_INIT_STR "Thread failed to init" #define REASON_THREAD_ZERO_HASH_STR "Thread got zero hashes" #define REASON_THREAD_FAIL_QUEUE_STR "Thread failed to queue work" #define REASON_DEV_SICK_IDLE_60_STR "Device idle for 60s" #define REASON_DEV_DEAD_IDLE_600_STR "Device dead - idle for 600s" #define REASON_DEV_NOSTART_STR "Device failed to start" #define REASON_DEV_OVER_HEAT_STR "Device over heated" #define REASON_DEV_THERMAL_CUTOFF_STR "Device reached thermal cutoff" #define REASON_DEV_COMMS_ERROR_STR "Device comms error" #define REASON_DEV_THROTTLE_STR "Device throttle" #define REASON_UNKNOWN_STR "Unknown reason - code bug" #define MIN_SEC_UNSET 99999999 enum { MSG_NOPOOL = 8, MSG_MISPID = 25, MSG_INVPID = 26, MSG_DUPPID = 74, MSG_POOLPRIO = 73, }; struct cgminer_stats { struct timeval start_tv; uint32_t getwork_calls; struct timeval getwork_wait; struct timeval getwork_wait_max; struct timeval getwork_wait_min; struct timeval _get_start; }; // Just the actual network getworks to the pool struct cgminer_pool_stats { uint32_t getwork_calls; uint32_t getwork_attempts; struct timeval getwork_wait; struct timeval getwork_wait_max; struct timeval getwork_wait_min; double getwork_wait_rolling; bool hadrolltime; bool canroll; bool hadexpire; uint32_t rolltime; double min_diff; double max_diff; double last_diff; uint32_t min_diff_count; uint32_t max_diff_count; uint64_t times_sent; uint64_t bytes_sent; uint64_t net_bytes_sent; uint64_t times_received; uint64_t bytes_received; uint64_t net_bytes_received; }; #define PRIprepr "-6s" #define PRIpreprv "s" struct cgpu_info { int cgminer_id; int device_line_id; struct device_drv *drv; const char *devtype; int device_id; char *dev_repr; char *dev_repr_ns; const char *name; int procs; int proc_id; char proc_repr[8]; char proc_repr_ns[8]; struct cgpu_info *device; struct cgpu_info *next_proc; const char *device_path; void *device_data; const char *dev_manufacturer; const char *dev_product; const char *dev_serial; union { #ifdef USE_ZTEX struct libztex_device *device_ztex; #endif int device_fd; #ifdef USE_X6500 struct ft232r_device_handle *device_ft232r; #endif }; #ifdef USE_AVALON struct work **works; int work_array; int queued; int results; #endif #ifdef USE_BITFORCE struct timeval work_start_tv; unsigned int wait_ms; unsigned int sleep_ms; double avg_wait_f; unsigned int avg_wait_d; uint32_t nonces; bool polling; #endif #if defined(USE_BITFORCE) || defined(USE_ICARUS) || defined(USE_TWINFURY) bool flash_led; #endif pthread_mutex_t device_mutex; pthread_cond_t device_cond; enum dev_enable deven; bool already_set_defaults; int accepted; int rejected; int stale; int bad_nonces; int hw_errors; double rolling; double total_mhashes; double utility; double utility_diff1; enum alive status; char init[40]; struct timeval last_message_tv; int threads; struct thr_info **thr; int64_t max_hashes; const char *kname; #ifdef HAVE_OPENCL bool mapped; int virtual_gpu; int virtual_adl; int intensity; bool dynamic; cl_uint vwidth; size_t work_size; enum cl_kernels kernel; cl_ulong max_alloc; #ifdef USE_SCRYPT int opt_lg, lookup_gap; size_t opt_tc, thread_concurrency; size_t shaders; #endif struct timeval tv_gpustart; int intervals; #endif float temp; int cutofftemp; uint8_t cutofftemp_default; int targettemp; uint8_t targettemp_default; #ifdef HAVE_ADL bool has_adl; struct gpu_adl adl; int gpu_engine; int min_engine; int gpu_fan; int min_fan; int gpu_memclock; int gpu_memdiff; int gpu_powertune; float gpu_vddc; #endif int diff1; double diff_accepted; double diff_rejected; double diff_stale; int last_share_pool; time_t last_share_pool_time; double last_share_diff; time_t last_device_valid_work; time_t device_last_well; time_t device_last_not_well; struct timeval tv_device_last_not_well; enum dev_reason device_not_well_reason; float reinit_backoff; int thread_fail_init_count; int thread_zero_hash_count; int thread_fail_queue_count; int dev_sick_idle_60_count; int dev_dead_idle_600_count; int dev_nostart_count; int dev_over_heat_count; // It's a warning but worth knowing int dev_thermal_cutoff_count; int dev_comms_error_count; int dev_throttle_count; struct cgminer_stats cgminer_stats; pthread_rwlock_t qlock; struct work *queued_work; struct work *unqueued_work; unsigned int queued_count; bool disable_watchdog; bool shutdown; }; extern void renumber_cgpu(struct cgpu_info *); extern bool add_cgpu(struct cgpu_info*); struct tq_ent; struct thread_q { struct tq_ent *q; bool frozen; pthread_mutex_t mutex; pthread_cond_t cond; }; enum thr_busy_state { TBS_IDLE, TBS_GETTING_RESULTS, TBS_STARTING_JOB, }; struct thr_info { int id; int device_thread; bool primary_thread; bool has_pth; pthread_t pth; struct thread_q *q; struct cgpu_info *cgpu; void *cgpu_data; struct timeval last; struct timeval sick; bool scanhash_working; uint64_t hashes_done; struct timeval tv_hashes_done; struct timeval tv_lastupdate; struct timeval _tv_last_hashes_done_call; bool pause; time_t getwork; double rolling; // Used by minerloop_async struct work *prev_work; struct work *work; struct work *next_work; enum thr_busy_state busy_state; bool _mt_disable_called; struct timeval tv_morework; struct work *results_work; bool _job_transition_in_progress; bool _proceed_with_new_job; struct timeval tv_results_jobstart; struct timeval tv_jobstart; struct timeval tv_poll; struct timeval tv_watchdog; notifier_t notifier; bool starting_next_work; uint32_t _max_nonce; notifier_t mutex_request; // Used by minerloop_queue struct work *work_list; bool queue_full; bool work_restart; notifier_t work_restart_notifier; }; struct string_elist { char *string; bool free_me; struct string_elist *prev; struct string_elist *next; }; static inline void string_elist_add(const char *s, struct string_elist **head) { struct string_elist *n; n = calloc(1, sizeof(*n)); n->string = strdup(s); n->free_me = true; DL_APPEND(*head, n); } static inline void string_elist_del(struct string_elist **head, struct string_elist *item) { if (item->free_me) free(item->string); DL_DELETE(*head, item); free(item); } static inline uint32_t swab32(uint32_t v) { return bswap_32(v); } static inline void swap256(void *dest_p, const void *src_p) { uint32_t *dest = dest_p; const uint32_t *src = src_p; dest[0] = src[7]; dest[1] = src[6]; dest[2] = src[5]; dest[3] = src[4]; dest[4] = src[3]; dest[5] = src[2]; dest[6] = src[1]; dest[7] = src[0]; } static inline void swap32yes(void*out, const void*in, size_t sz) { size_t swapcounter = 0; for (swapcounter = 0; swapcounter < sz; ++swapcounter) (((uint32_t*)out)[swapcounter]) = swab32(((uint32_t*)in)[swapcounter]); } #define LOCAL_swap32(type, var, sz) \ type __swapped_ ## var[sz * 4 / sizeof(type)]; \ swap32yes(__swapped_ ## var, var, sz); \ var = __swapped_ ## var; \ // end #ifdef WORDS_BIGENDIAN # define swap32tobe(out, in, sz) ((out == in) ? (void)0 : memmove(out, in, sz)) # define LOCAL_swap32be(type, var, sz) ; # define swap32tole(out, in, sz) swap32yes(out, in, sz) # define LOCAL_swap32le(type, var, sz) LOCAL_swap32(type, var, sz) #else # define swap32tobe(out, in, sz) swap32yes(out, in, sz) # define LOCAL_swap32be(type, var, sz) LOCAL_swap32(type, var, sz) # define swap32tole(out, in, sz) ((out == in) ? (void)0 : memmove(out, in, sz)) # define LOCAL_swap32le(type, var, sz) ; #endif static inline void swab256(void *dest_p, const void *src_p) { uint32_t *dest = dest_p; const uint32_t *src = src_p; dest[0] = swab32(src[7]); dest[1] = swab32(src[6]); dest[2] = swab32(src[5]); dest[3] = swab32(src[4]); dest[4] = swab32(src[3]); dest[5] = swab32(src[2]); dest[6] = swab32(src[1]); dest[7] = swab32(src[0]); } #define flip32(dest_p, src_p) swap32yes(dest_p, src_p, 32 / 4) #define WATCHDOG_INTERVAL 2 extern void bfg_watchdog(struct cgpu_info *, struct timeval *tvp_now); extern void _quit(int status); static inline void mutex_lock(pthread_mutex_t *lock) { if (unlikely(pthread_mutex_lock(lock))) quit(1, "WTF MUTEX ERROR ON LOCK!"); } static inline void mutex_unlock_noyield(pthread_mutex_t *lock) { if (unlikely(pthread_mutex_unlock(lock))) quit(1, "WTF MUTEX ERROR ON UNLOCK!"); } static inline void mutex_unlock(pthread_mutex_t *lock) { mutex_unlock_noyield(lock); sched_yield(); } static inline int mutex_trylock(pthread_mutex_t *lock) { return pthread_mutex_trylock(lock); } static inline void wr_lock(pthread_rwlock_t *lock) { if (unlikely(pthread_rwlock_wrlock(lock))) quit(1, "WTF WRLOCK ERROR ON LOCK!"); } static inline void rd_lock(pthread_rwlock_t *lock) { if (unlikely(pthread_rwlock_rdlock(lock))) quit(1, "WTF RDLOCK ERROR ON LOCK!"); } static inline void rw_unlock(pthread_rwlock_t *lock) { if (unlikely(pthread_rwlock_unlock(lock))) quit(1, "WTF RWLOCK ERROR ON UNLOCK!"); } static inline void rd_unlock_noyield(pthread_rwlock_t *lock) { rw_unlock(lock); } static inline void wr_unlock_noyield(pthread_rwlock_t *lock) { rw_unlock(lock); } static inline void rd_unlock(pthread_rwlock_t *lock) { rw_unlock(lock); sched_yield(); } static inline void wr_unlock(pthread_rwlock_t *lock) { rw_unlock(lock); sched_yield(); } static inline void mutex_init(pthread_mutex_t *lock) { if (unlikely(pthread_mutex_init(lock, NULL))) quit(1, "Failed to pthread_mutex_init"); } static inline void mutex_destroy(pthread_mutex_t *lock) { /* Ignore return code. This only invalidates the mutex on linux but * releases resources on windows. */ pthread_mutex_destroy(lock); } static inline void rwlock_init(pthread_rwlock_t *lock) { if (unlikely(pthread_rwlock_init(lock, NULL))) quit(1, "Failed to pthread_rwlock_init"); } /* cgminer locks, a write biased variant of rwlocks */ struct cglock { pthread_mutex_t mutex; pthread_rwlock_t rwlock; }; typedef struct cglock cglock_t; static inline void rwlock_destroy(pthread_rwlock_t *lock) { pthread_rwlock_destroy(lock); } static inline void cglock_init(cglock_t *lock) { mutex_init(&lock->mutex); rwlock_init(&lock->rwlock); } static inline void cglock_destroy(cglock_t *lock) { rwlock_destroy(&lock->rwlock); mutex_destroy(&lock->mutex); } /* Read lock variant of cglock. Cannot be promoted. */ static inline void cg_rlock(cglock_t *lock) { mutex_lock(&lock->mutex); rd_lock(&lock->rwlock); mutex_unlock_noyield(&lock->mutex); } /* Intermediate variant of cglock - behaves as a read lock but can be promoted * to a write lock or demoted to read lock. */ static inline void cg_ilock(cglock_t *lock) { mutex_lock(&lock->mutex); } /* Upgrade intermediate variant to a write lock */ static inline void cg_ulock(cglock_t *lock) { wr_lock(&lock->rwlock); } /* Write lock variant of cglock */ static inline void cg_wlock(cglock_t *lock) { mutex_lock(&lock->mutex); wr_lock(&lock->rwlock); } /* Downgrade write variant to a read lock */ static inline void cg_dwlock(cglock_t *lock) { wr_unlock_noyield(&lock->rwlock); rd_lock(&lock->rwlock); mutex_unlock_noyield(&lock->mutex); } /* Demote a write variant to an intermediate variant */ static inline void cg_dwilock(cglock_t *lock) { wr_unlock(&lock->rwlock); } /* Downgrade intermediate variant to a read lock */ static inline void cg_dlock(cglock_t *lock) { rd_lock(&lock->rwlock); mutex_unlock(&lock->mutex); } static inline void cg_runlock(cglock_t *lock) { rd_unlock(&lock->rwlock); } static inline void cg_wunlock(cglock_t *lock) { wr_unlock_noyield(&lock->rwlock); mutex_unlock(&lock->mutex); } struct pool; #define API_MCAST_CODE "FTW" #define API_MCAST_ADDR "224.0.0.75" extern bool opt_protocol; extern bool opt_dev_protocol; extern char *opt_coinbase_sig; extern char *request_target_str; extern bool have_longpoll; extern int opt_skip_checks; extern char *opt_kernel_path; extern char *opt_socks_proxy; extern char *cmd_idle, *cmd_sick, *cmd_dead; extern char *cgminer_path; extern bool opt_fail_only; extern bool opt_autofan; extern bool opt_autoengine; extern bool use_curses; #ifdef HAVE_LIBUSB extern bool have_libusb; #endif extern int httpsrv_port; extern int stratumsrv_port; extern char *opt_api_allow; extern bool opt_api_mcast; extern char *opt_api_mcast_addr; extern char *opt_api_mcast_code; extern char *opt_api_mcast_des; extern int opt_api_mcast_port; extern char *opt_api_groups; extern char *opt_api_description; extern int opt_api_port; extern bool opt_api_listen; extern bool opt_api_network; extern bool opt_delaynet; extern bool opt_restart; extern char *opt_icarus_options; extern char *opt_icarus_timing; extern bool opt_worktime; #ifdef USE_AVALON extern char *opt_avalon_options; #endif #ifdef USE_KLONDIKE extern char *opt_klondike_options; #endif #ifdef USE_BITFORCE extern bool opt_bfl_noncerange; #endif extern int swork_id; extern pthread_rwlock_t netacc_lock; extern const uint32_t sha256_init_state[]; extern json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool, bool, int *, struct pool *pool, bool); extern bool our_curl_supports_proxy_uris(); extern void bin2hex(char *out, const void *in, size_t len); extern bool hex2bin(unsigned char *p, const char *hexstr, size_t len); typedef bool (*sha256_func)(struct thr_info*, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce); extern bool fulltest(const unsigned char *hash, const unsigned char *target); extern int opt_queue; extern int opt_scantime; extern int opt_expiry; extern cglock_t control_lock; extern pthread_mutex_t stats_lock; extern pthread_mutex_t hash_lock; extern pthread_mutex_t console_lock; extern cglock_t ch_lock; extern pthread_rwlock_t mining_thr_lock; extern pthread_rwlock_t devices_lock; extern bool _bfg_console_cancel_disabled; extern int _bfg_console_prev_cancelstate; static inline void bfg_console_lock(void) { _bfg_console_cancel_disabled = !pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &_bfg_console_prev_cancelstate); mutex_lock(&console_lock); } static inline void bfg_console_unlock(void) { mutex_unlock(&console_lock); if (_bfg_console_cancel_disabled) pthread_setcancelstate(_bfg_console_prev_cancelstate, &_bfg_console_prev_cancelstate); } extern void thread_reportin(struct thr_info *thr); extern void thread_reportout(struct thr_info *); extern void clear_stratum_shares(struct pool *pool); extern void hashmeter2(struct thr_info *); extern bool stale_work(struct work *, bool share); extern bool stale_work_future(struct work *, bool share, unsigned long ustime); extern void set_target(unsigned char *dest_target, double diff); extern void kill_work(void); extern void app_restart(void); extern void __thr_being_msg(int prio, struct thr_info *, const char *); extern void mt_enable(struct thr_info *thr); extern void proc_enable(struct cgpu_info *); extern void reinit_device(struct cgpu_info *cgpu); extern void cgpu_set_defaults(struct cgpu_info *); extern void drv_set_defaults(const struct device_drv *, char *(*set_func)(struct cgpu_info *, char *, char *, char *), void *userp); #ifdef HAVE_ADL extern bool gpu_stats(int gpu, float *temp, int *engineclock, int *memclock, float *vddc, int *activity, int *fanspeed, int *fanpercent, int *powertune); extern int set_fanspeed(int gpu, int iFanSpeed); extern int set_vddc(int gpu, float fVddc); extern int set_engineclock(int gpu, int iEngineClock); extern int set_memoryclock(int gpu, int iMemoryClock); #endif extern void api(int thr_id); extern struct pool *current_pool(void); extern int enabled_pools; extern bool get_intrange(const char *arg, int *val1, int *val2); extern bool detect_stratum(struct pool *pool, char *url); extern void print_summary(void); extern void adjust_quota_gcd(void); extern struct pool *add_pool(void); extern bool add_pool_details(struct pool *pool, bool live, char *url, char *user, char *pass); #define MAX_GPUDEVICES 16 #define MAX_DEVICES 4096 #define MIN_SHA_INTENSITY -10 #define MIN_SHA_INTENSITY_STR "-10" #define MAX_SHA_INTENSITY 14 #define MAX_SHA_INTENSITY_STR "14" #define MIN_SCRYPT_INTENSITY 8 #define MIN_SCRYPT_INTENSITY_STR "8" #define MAX_SCRYPT_INTENSITY 20 #define MAX_SCRYPT_INTENSITY_STR "20" #ifdef USE_SCRYPT #define MIN_INTENSITY (opt_scrypt ? MIN_SCRYPT_INTENSITY : MIN_SHA_INTENSITY) #define MIN_INTENSITY_STR (opt_scrypt ? MIN_SCRYPT_INTENSITY_STR : MIN_SHA_INTENSITY_STR) #define MAX_INTENSITY (opt_scrypt ? MAX_SCRYPT_INTENSITY : MAX_SHA_INTENSITY) #define MAX_INTENSITY_STR (opt_scrypt ? MAX_SCRYPT_INTENSITY_STR : MAX_SHA_INTENSITY_STR) #define MAX_GPU_INTENSITY MAX_SCRYPT_INTENSITY #else #define MIN_INTENSITY MIN_SHA_INTENSITY #define MIN_INTENSITY_STR MIN_SHA_INTENSITY_STR #define MAX_INTENSITY MAX_SHA_INTENSITY #define MAX_INTENSITY_STR MAX_SHA_INTENSITY_STR #define MAX_GPU_INTENSITY MAX_SHA_INTENSITY #endif extern struct string_elist *scan_devices; extern bool opt_force_dev_init; extern int nDevs; extern int opt_n_threads; extern int num_processors; extern int hw_errors; extern bool use_syslog; extern bool opt_quiet; extern struct thr_info *control_thr; extern struct thr_info **mining_thr; extern struct cgpu_info gpus[MAX_GPUDEVICES]; #ifdef USE_SCRYPT extern bool opt_scrypt; #else #define opt_scrypt (0) #endif extern double total_secs; extern int mining_threads; extern struct cgpu_info *cpus; extern int total_devices; extern struct cgpu_info **devices; extern int total_devices_new; extern struct cgpu_info **devices_new; extern int total_pools; extern struct pool **pools; extern const char *algo_names[]; extern enum sha256_algos opt_algo; extern struct strategies strategies[]; extern enum pool_strategy pool_strategy; extern int opt_rotate_period; extern double total_rolling; extern double total_mhashes_done; extern unsigned int new_blocks; extern unsigned int found_blocks; extern int total_accepted, total_rejected, total_diff1;; extern int total_bad_nonces; extern int total_getworks, total_stale, total_discarded; extern uint64_t total_bytes_rcvd, total_bytes_sent; #define total_bytes_xfer (total_bytes_rcvd + total_bytes_sent) extern double total_diff_accepted, total_diff_rejected, total_diff_stale; extern unsigned int local_work; extern unsigned int total_go, total_ro; extern const int opt_cutofftemp; extern int opt_hysteresis; extern int opt_fail_pause; extern int opt_log_interval; extern unsigned long long global_hashrate; extern char *current_fullhash; extern double current_diff; extern uint64_t best_diff; extern time_t block_time; #ifdef HAVE_OPENCL typedef struct { cl_uint ctx_a; cl_uint ctx_b; cl_uint ctx_c; cl_uint ctx_d; cl_uint ctx_e; cl_uint ctx_f; cl_uint ctx_g; cl_uint ctx_h; cl_uint cty_a; cl_uint cty_b; cl_uint cty_c; cl_uint cty_d; cl_uint cty_e; cl_uint cty_f; cl_uint cty_g; cl_uint cty_h; cl_uint merkle; cl_uint ntime; cl_uint nbits; cl_uint nonce; cl_uint fW0; cl_uint fW1; cl_uint fW2; cl_uint fW3; cl_uint fW15; cl_uint fW01r; cl_uint fcty_e; cl_uint fcty_e2; cl_uint W16; cl_uint W17; cl_uint W2; cl_uint PreVal4; cl_uint T1; cl_uint C1addK5; cl_uint D1A; cl_uint W2A; cl_uint W17_2; cl_uint PreVal4addT1; cl_uint T1substate0; cl_uint PreVal4_2; cl_uint PreVal0; cl_uint PreW18; cl_uint PreW19; cl_uint PreW31; cl_uint PreW32; /* For diakgcn */ cl_uint B1addK6, PreVal0addK7, W16addK16, W17addK17; cl_uint zeroA, zeroB; cl_uint oneA, twoA, threeA, fourA, fiveA, sixA, sevenA; #ifdef USE_SCRYPT struct work *work; #endif } dev_blk_ctx; #else typedef struct { uint32_t nonce; } dev_blk_ctx; #endif struct curl_ent { CURL *curl; struct curl_ent *next; struct timeval tv; }; /* Disabled needs to be the lowest enum as a freshly calloced value will then * equal disabled */ enum pool_enable { POOL_DISABLED, POOL_ENABLED, POOL_REJECTING, }; enum pool_protocol { PLP_NONE, PLP_GETWORK, PLP_GETBLOCKTEMPLATE, }; struct stratum_work { char *job_id; bool clean; bytes_t coinbase; size_t nonce2_offset; int merkles; bytes_t merkle_bin; uint8_t header1[36]; uint8_t diffbits[4]; uint32_t ntime; struct timeval tv_received; double diff; bool transparency_probed; struct timeval tv_transparency; bool opaque; cglock_t *data_lock_p; }; #define RBUFSIZE 8192 #define RECVSIZE (RBUFSIZE - 4) struct pool { int pool_no; int prio; int accepted, rejected; int seq_rejects; int seq_getfails; int solved; int diff1; char diff[8]; int quota; int quota_gcd; int quota_used; int works; double diff_accepted; double diff_rejected; double diff_stale; bool submit_fail; bool idle; bool lagging; bool probed; int force_rollntime; enum pool_enable enabled; bool submit_old; bool removed; bool lp_started; unsigned char work_restart_id; uint32_t block_id; enum pool_protocol proto; char *hdr_path; char *lp_url; char *lp_id; enum pool_protocol lp_proto; curl_socket_t lp_socket; unsigned int getwork_requested; unsigned int stale_shares; unsigned int discarded_work; unsigned int getfail_occasions; unsigned int remotefail_occasions; struct timeval tv_idle; double utility; int last_shares, shares; char *rpc_url; char *rpc_userpass; char *rpc_user, *rpc_pass; char *rpc_proxy; pthread_mutex_t pool_lock; cglock_t data_lock; struct thread_q *submit_q; struct thread_q *getwork_q; pthread_t longpoll_thread; pthread_t test_thread; bool testing; int curls; pthread_cond_t cr_cond; struct curl_ent *curllist; struct submit_work_state *sws_waiting_on_curl; time_t last_work_time; struct timeval tv_last_work_time; time_t last_share_time; double last_share_diff; uint64_t best_diff; struct cgminer_stats cgminer_stats; struct cgminer_pool_stats cgminer_pool_stats; /* Stratum variables */ char *stratum_url; char *stratum_port; CURL *stratum_curl; SOCKETTYPE sock; char *sockbuf; size_t sockbuf_size; char *sockaddr_url; /* stripped url used for sockaddr */ char *nonce1; size_t n1_len; uint32_t nonce2; int nonce2sz; #ifdef WORDS_BIGENDIAN int nonce2off; #endif int n2size; char *sessionid; bool has_stratum; bool stratum_active; bool stratum_init; bool stratum_notify; struct stratum_work swork; pthread_t stratum_thread; pthread_mutex_t stratum_lock; char *admin_msg; pthread_mutex_t last_work_lock; struct work *last_work_copy; }; #define GETWORK_MODE_TESTPOOL 'T' #define GETWORK_MODE_POOL 'P' #define GETWORK_MODE_LP 'L' #define GETWORK_MODE_BENCHMARK 'B' #define GETWORK_MODE_STRATUM 'S' #define GETWORK_MODE_GBT 'G' struct work { unsigned char data[128]; unsigned char midstate[32]; unsigned char target[32]; unsigned char hash[32]; uint64_t share_diff; int rolls; int drv_rolllimit; /* How much the driver can roll ntime */ dev_blk_ctx blk; struct thr_info *thr; int thr_id; struct pool *pool; struct timeval tv_staged; bool mined; bool clone; bool cloned; int rolltime; bool longpoll; bool stale; bool mandatory; bool block; bool stratum; char *job_id; bytes_t nonce2; double sdiff; char *nonce1; unsigned char work_restart_id; int id; int device_id; UT_hash_handle hh; double work_difficulty; // Allow devices to identify work if multiple sub-devices // DEPRECATED: New code should be using multiple processors instead unsigned char subid; // Allow devices to timestamp work for their own purposes struct timeval tv_stamp; blktemplate_t *tmpl; int *tmpl_refcount; unsigned int dataid; bool do_foreign_submit; struct timeval tv_getwork; time_t ts_getwork; struct timeval tv_getwork_reply; struct timeval tv_cloned; struct timeval tv_work_start; struct timeval tv_work_found; char getwork_mode; /* Used to queue shares in submit_waiting */ struct work *prev; struct work *next; }; extern void get_datestamp(char *, size_t, time_t); #define get_now_datestamp(buf, bufsz) get_datestamp(buf, bufsz, INVALID_TIMESTAMP) extern void stratum_work_cpy(struct stratum_work *dst, const struct stratum_work *src); extern void stratum_work_clean(struct stratum_work *); extern void gen_stratum_work2(struct work *, struct stratum_work *, const char *nonce1); extern void inc_hw_errors2(struct thr_info *thr, const struct work *work, const uint32_t *bad_nonce_p); #define UNKNOWN_NONCE ((uint32_t*)inc_hw_errors2) extern void inc_hw_errors(struct thr_info *, const struct work *, const uint32_t bad_nonce); #define inc_hw_errors_only(thr) inc_hw_errors(thr, NULL, 0) enum test_nonce2_result { TNR_GOOD = 1, TNR_HIGH = 0, TNR_BAD = -1, }; extern enum test_nonce2_result _test_nonce2(struct work *, uint32_t nonce, bool checktarget); #define test_nonce(work, nonce, checktarget) (_test_nonce2(work, nonce, checktarget) == TNR_GOOD) #define test_nonce2(work, nonce) (_test_nonce2(work, nonce, true)) extern bool submit_nonce(struct thr_info *thr, struct work *work, uint32_t nonce); extern bool submit_noffset_nonce(struct thr_info *thr, struct work *work, uint32_t nonce, int noffset); extern void __add_queued(struct cgpu_info *cgpu, struct work *work); extern struct work *get_queued(struct cgpu_info *cgpu); extern void add_queued(struct cgpu_info *cgpu, struct work *work); extern struct work *get_queue_work(struct thr_info *thr, struct cgpu_info *cgpu, int thr_id); extern struct work *__find_work_bymidstate(struct work *que, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); extern struct work *find_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); extern struct work *clone_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); extern void __work_completed(struct cgpu_info *cgpu, struct work *work); extern void work_completed(struct cgpu_info *cgpu, struct work *work); extern struct work *take_queued_work_bymidstate(struct cgpu_info *cgpu, char *midstate, size_t midstatelen, char *data, int offset, size_t datalen); extern bool abandon_work(struct work *, struct timeval *work_runtime, uint64_t hashes); extern void hash_queued_work(struct thr_info *mythr); extern void get_statline3(char *buf, size_t bufsz, struct cgpu_info *, bool for_curses, bool opt_show_procs); extern void tailsprintf(char *buf, size_t bufsz, const char *fmt, ...) FORMAT_SYNTAX_CHECK(printf, 3, 4); extern void _wlog(const char *str); extern void _wlogprint(const char *str); extern int curses_int(const char *query); extern char *curses_input(const char *query); extern bool drv_ready(struct cgpu_info *); extern double stats_elapsed(struct cgminer_stats *); #define cgpu_runtime(cgpu) stats_elapsed(&((cgpu)->cgminer_stats)) extern double cgpu_utility(struct cgpu_info *); extern void kill_work(void); extern int prioritize_pools(char *param, int *pid); extern void validate_pool_priorities(void); extern void switch_pools(struct pool *selected); extern void remove_pool(struct pool *pool); extern void write_config(FILE *fcfg); extern void zero_bestshare(void); extern void zero_stats(void); extern void default_save_file(char *filename); extern bool _log_curses_only(int prio, const char *datetime, const char *str); extern void clear_logwin(void); extern void logwin_update(void); extern bool pool_tclear(struct pool *pool, bool *var); extern struct thread_q *tq_new(void); extern void tq_free(struct thread_q *tq); extern bool tq_push(struct thread_q *tq, void *data); extern void *tq_pop(struct thread_q *tq, const struct timespec *abstime); extern void tq_freeze(struct thread_q *tq); extern void tq_thaw(struct thread_q *tq); extern bool successful_connect; extern void adl(void); extern void clean_work(struct work *work); extern void free_work(struct work *work); extern void __copy_work(struct work *work, const struct work *base_work); extern struct work *copy_work(const struct work *base_work); extern char *devpath_to_devid(const char *); extern struct thr_info *get_thread(int thr_id); extern struct cgpu_info *get_devices(int id); extern int create_new_cgpus(void (*addfunc)(void*), void *arg); extern int scan_serial(const char *); enum api_data_type { API_ESCAPE, API_STRING, API_CONST, API_UINT8, API_UINT16, API_INT, API_UINT, API_UINT32, API_UINT64, API_DOUBLE, API_ELAPSED, API_BOOL, API_TIMEVAL, API_TIME, API_MHS, API_MHTOTAL, API_TEMP, API_UTILITY, API_FREQ, API_VOLTS, API_HS, API_DIFF, API_JSON, API_PERCENT }; struct api_data { enum api_data_type type; char *name; void *data; bool data_was_malloc; struct api_data *prev; struct api_data *next; }; extern struct api_data *api_add_escape(struct api_data *root, char *name, char *data, bool copy_data); extern struct api_data *api_add_string(struct api_data *root, char *name, const char *data, bool copy_data); extern struct api_data *api_add_const(struct api_data *root, char *name, const char *data, bool copy_data); extern struct api_data *api_add_uint8(struct api_data *root, char *name, uint8_t *data, bool copy_data); extern struct api_data *api_add_uint16(struct api_data *root, char *name, uint16_t *data, bool copy_data); extern struct api_data *api_add_int(struct api_data *root, char *name, int *data, bool copy_data); extern struct api_data *api_add_uint(struct api_data *root, char *name, unsigned int *data, bool copy_data); extern struct api_data *api_add_uint32(struct api_data *root, char *name, uint32_t *data, bool copy_data); extern struct api_data *api_add_uint64(struct api_data *root, char *name, uint64_t *data, bool copy_data); extern struct api_data *api_add_double(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_elapsed(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_bool(struct api_data *root, char *name, bool *data, bool copy_data); extern struct api_data *api_add_timeval(struct api_data *root, char *name, struct timeval *data, bool copy_data); extern struct api_data *api_add_time(struct api_data *root, char *name, time_t *data, bool copy_data); extern struct api_data *api_add_mhs(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_mhstotal(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_temp(struct api_data *root, char *name, float *data, bool copy_data); extern struct api_data *api_add_utility(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_freq(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_volts(struct api_data *root, char *name, float *data, bool copy_data); extern struct api_data *api_add_hs(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_diff(struct api_data *root, char *name, double *data, bool copy_data); extern struct api_data *api_add_json(struct api_data *root, char *name, json_t *data, bool copy_data); #endif /* __MINER_H__ */ bfgminer-bfgminer-3.10.0/miner.php000066400000000000000000002057171226556647300170320ustar00rootroot00000000000000\n"; # # See README.RPC for more details of these variables and how # to configure miner.php # # Web page title $title = 'Mine'; # # Set $readonly to true to force miner.php to be readonly # Set $readonly to false then it will check BFGMiner 'privileged' $readonly = false; # # Set $userlist to null to allow anyone access or read README.RPC $userlist = null; # # Set $per_proc to false to display only full device summaries $per_proc = true; # # Set $notify to false to NOT attempt to display the notify command # Set $notify to true to attempt to display the notify command $notify = true; # # Set $checklastshare to true to do the following checks: # If a device's last share is 12x expected ago then display as an error # If a device's last share is 8x expected ago then display as a warning # If either of the above is true, also display the whole line highlighted # This assumes shares are 1 difficulty shares $checklastshare = true; # # Set $poolinputs to true to show the input fields for adding a pool # and changing the pool priorities # N.B. also if $readonly is true, it will not display the fields $poolinputs = false; # # Set $rigs to an array of your BFGMiner rigs that are running # format: 'IP:Port' or 'Host:Port' or 'Host:Port:Name' $rigs = array('127.0.0.1:4028'); # # Set $mcast to true to look for your rigs and ignore $rigs $mcast = false; # # Set $mcastexpect to at least how many rigs you expect it to find $mcastexpect = 0; # # API Multicast address all cgminers are listening on $mcastaddr = '224.0.0.75'; # # API Multicast UDP port all cgminers are listening on $mcastport = 4028; # # The code all cgminers expect in the Multicast message sent $mcastcode = 'FTW'; # # UDP port cgminers are to reply on (by request) $mcastlistport = 4027; # # Set $mcasttimeout to the number of seconds (floating point) # to wait for replies to the Multicast message $mcasttimeout = 1.5; # # Set $mcastretries to the number of times to retry the multicast $mcastretries = 0; # # Set $allowgen to true to allow customsummarypages to use 'gen' # false means ignore any 'gen' options $allowgen = false; # # Set $rigipsecurity to false to show the IP/Port of the rig # in the socket error messages and also show the full socket message $rigipsecurity = true; # # Set $rigtotals to true to display totals on the single rig page # 'false' means no totals (and ignores $forcerigtotals) # You can force it to always show rig totals when there is only # one line by setting $forcerigtotals = true; $rigtotals = true; $forcerigtotals = false; # # These should be OK for most cases $socksndtimeoutsec = 10; $sockrcvtimeoutsec = 40; # # List of fields NOT to be displayed # This example would hide the slightly more sensitive pool information #$hidefields = array('POOL.URL' => 1, 'POOL.User' => 1); $hidefields = array(); # # Auto-refresh of the page (in seconds) - integers only # $ignorerefresh = true/false always ignore refresh parameters # $changerefresh = true/false show buttons to change the value # $autorefresh = default value, 0 means dont auto-refresh $ignorerefresh = false; $changerefresh = true; $autorefresh = 0; # # Should we allow custom pages? # (or just completely ignore them and don't display the buttons) $allowcustompages = true; # # OK this is a bit more complex item: Custom Summary Pages # As mentioned above, see README.RPC # see the example below (if there is no matching data, no total will show) $mobilepage = array( 'DATE' => null, 'RIGS' => null, 'SUMMARY' => array('Elapsed', 'MHS av', 'Found Blocks=Blks', 'Accepted', 'Rejected=Rej', 'Utility'), 'DEVS+NOTIFY' => array('DEVS.Name=Name', 'DEVS.ID=ID', 'DEVS.ProcID=Proc', 'DEVS.Status=Status', 'DEVS.Temperature=Temp', 'DEVS.MHS av=MHS av', 'DEVS.Accepted=Accept', 'DEVS.Rejected=Rej', 'DEVS.Utility=Utility', 'NOTIFY.Last Not Well=Not Well'), 'POOL' => array('POOL', 'Status', 'Accepted', 'Rejected=Rej', 'Last Share Time')); $mobilesum = array( 'SUMMARY' => array('MHS av', 'Found Blocks', 'Accepted', 'Rejected', 'Utility'), 'DEVS+NOTIFY' => array('DEVS.MHS av', 'DEVS.Accepted', 'DEVS.Rejected', 'DEVS.Utility'), 'POOL' => array('Accepted', 'Rejected')); # $statspage = array( 'DATE' => null, 'RIGS' => null, 'SUMMARY' => array('Elapsed', 'MHS av', 'Found Blocks=Blks', 'Accepted', 'Rejected=Rej', 'Utility', 'Hardware Errors=HW Errs', 'Network Blocks=Net Blks', 'Work Utility'), 'COIN' => array('*'), 'STATS' => array('*')); # $statssum = array( 'SUMMARY' => array('MHS av', 'Found Blocks', 'Accepted', 'Rejected', 'Utility', 'Hardware Errors', 'Work Utility')); # $poolspage = array( 'DATE' => null, 'RIGS' => null, 'SUMMARY' => array('Elapsed', 'MHS av', 'Found Blocks=Blks', 'Accepted', 'Rejected=Rej', 'Utility', 'Hardware Errors=HW Errs', 'Network Blocks=Net Blks'), 'POOL+STATS' => array('STATS.ID=ID', 'POOL.URL=URL', 'POOL.Has Stratum=Stratum', 'POOL.Stratum Active=StrAct', 'STATS.Net Bytes Sent=NSent', 'STATS.Net Bytes Recv=NRecv', 'GEN.AvShr=AvShr')); # $poolssum = array( 'SUMMARY' => array('MHS av', 'Found Blocks', 'Accepted', 'Rejected', 'Utility', 'Hardware Errors'), 'POOL+STATS' => array( 'STATS.Net Bytes Sent', 'STATS.Net Bytes Recv')); # $poolsext = array( 'POOL+STATS' => array( 'where' => null, 'group' => array('POOL.URL', 'POOL.Has Stratum', 'POOL.Stratum Active'), 'calc' => array( 'STATS.Net Bytes Sent' => 'sum', 'STATS.Net Bytes Recv' => 'sum', 'POOL.Accepted' => 'sum'), 'gen' => array('AvShr' => 'round(POOL.Difficulty Accepted/max(POOL.Accepted,1)*100)/100'), 'having' => array(array('STATS.Bytes Recv', '>', 0))) ); # # customsummarypages is an array of these Custom Summary Pages $customsummarypages = array('Mobile' => array($mobilepage, $mobilesum), 'Stats' => array($statspage, $statssum), 'Pools' => array($poolspage, $poolssum, $poolsext)); # $here = $_SERVER['PHP_SELF']; # global $tablebegin, $tableend, $warnfont, $warnoff, $dfmt; # $tablebegin = ''; $tableend = '
'; $warnfont = ''; $warnoff = ''; $dfmt = 'H:i:s j-M-Y \U\T\CP'; # $miner_font_family = 'Verdana, Arial, sans-serif, sans'; $miner_font_size = '13pt'; # $bad_font_family = '"Times New Roman", Times, serif'; $bad_font_size = '18pt'; # # Edit this or redefine it in myminer.php to change the colour scheme # See $colourtable below for the list of names $colouroverride = array(); # # Where to place the buttons: 'top' 'bot' 'both' # anything else means don't show them - case sensitive $placebuttons = 'top'; # # This below allows you to put your own settings into a seperate file # so you don't need to update miner.php with your preferred settings # every time a new version is released # Just create the file 'myminer.php' in the same directory as # 'miner.php' - and put your own settings in there if (file_exists('myminer.php')) include_once('myminer.php'); # # This is the system default that must always contain all necessary # colours so it must be a constant # You can override these values with $colouroverride # The only one missing is $warnfont # - which you can override directly anyway global $colourtable; $colourtable = array( 'body bgcolor' => '#ecffff', 'td color' => 'blue', 'td.two color' => 'blue', 'td.two background' => '#ecffff', 'td.h color' => 'blue', 'td.h background' => '#c4ffff', 'td.err color' => 'black', 'td.err background' => '#ff3050', 'td.bad color' => 'black', 'td.bad background' => '#ff3050', 'td.warn color' => 'black', 'td.warn background' => '#ffb050', 'td.sta color' => 'green', 'td.tot color' => 'blue', 'td.tot background' => '#fff8f2', 'td.lst color' => 'blue', 'td.lst background' => '#ffffdd', 'td.hi color' => 'blue', 'td.hi background' => '#f6ffff', 'td.lo color' => 'blue', 'td.lo background' => '#deffff' ); # # Don't touch these 2 $miner = null; $port = null; # # Ensure it is only ever shown once global $showndate; $showndate = false; # # For summary page to stop retrying failed rigs global $rigerror; $rigerror = array(); # global $rownum; $rownum = 0; # // Login global $ses; $ses = 'rutroh'; # function getcss($cssname, $dom = false) { global $colourtable, $colouroverride; $css = ''; foreach ($colourtable as $cssdata => $value) { $cssobj = explode(' ', $cssdata, 2); if ($cssobj[0] == $cssname) { if (isset($colouroverride[$cssdata])) $value = $colouroverride[$cssdata]; if ($dom == true) $css .= ' '.$cssobj[1].'='.$value; else $css .= $cssobj[1].':'.$value.'; '; } } return $css; } # function getdom($domname) { return getcss($domname, true); } # function htmlhead($mcerr, $checkapi, $rig, $pg = null, $noscript = false) { global $doctype, $title, $miner_font_family, $miner_font_size; global $bad_font_family, $bad_font_size; global $error, $readonly, $poolinputs, $here; global $ignorerefresh, $autorefresh; $extraparams = ''; if ($rig != null && $rig != '') $extraparams = "&rig=$rig"; else if ($pg != null && $pg != '') $extraparams = "&pg=$pg"; if ($ignorerefresh == true || $autorefresh == 0) $refreshmeta = ''; else { $url = "$here?ref=$autorefresh$extraparams"; $refreshmeta = "\n"; } if ($readonly === false && $checkapi === true) { $error = null; $access = api($rig, 'privileged'); if ($error != null || !isset($access['STATUS']['STATUS']) || $access['STATUS']['STATUS'] != 'S') $readonly = true; } $miner_font = "font-family:$miner_font_family; font-size:$miner_font_size;"; $bad_font = "font-family:$bad_font_family; font-size:$bad_font_size;"; echo "$doctype$refreshmeta $title \n"; if ($noscript === false) { echo "\n"; } ?>
0); do { $mcast_soc = socket_create(AF_INET, SOCK_DGRAM, SOL_UDP); if ($mcast_soc === false || $mcast_soc == null) { $msg = "ERR: mcast send socket create(UDP) failed"; if ($rigipsecurity === false) { $error = socket_strerror(socket_last_error()); $error = "$msg '$error'\n"; } else $error = "$msg\n"; socket_close($rep_soc); return; } $buf = "cgminer-$mcastcode-$mcastlistport"; socket_sendto($mcast_soc, $buf, strlen($buf), 0, $mcastaddr, $mcastport); socket_close($mcast_soc); $stt = microtime(true); while (true) { $got = @socket_recvfrom($rep_soc, $buf, 32, MSG_DONTWAIT, $ip, $p); if ($got !== false && $got > 0) { $ans = explode('-', $buf, 4); if (count($ans) >= 3 && $ans[0] == 'cgm' && $ans[1] == 'FTW') { $rp = intval($ans[2]); if (count($ans) > 3) $mdes = str_replace("\0", '', $ans[3]); else $mdes = ''; if (strlen($mdes) > 0) $rig = "$ip:$rp:$mdes"; else $rig = "$ip:$rp"; if (!in_array($rig, $rigs)) $rigs[] = $rig; } } if ((microtime(true) - $stt) >= $mcasttimeout) break; usleep(100000); } if ($mcastexpect > 0 && count($rigs) >= $mcastexpect) $doretry = false; } while ($doretry && --$retries > 0); socket_close($rep_soc); } # function getrigs() { global $rigs; mcastrigs(); sort($rigs); } # function getsock($rig, $addr, $port) { global $rigipsecurity; global $haderror, $error, $socksndtimeoutsec, $sockrcvtimeoutsec; $error = null; $socket = null; $socket = socket_create(AF_INET, SOCK_STREAM, SOL_TCP); if ($socket === false || $socket === null) { $haderror = true; if ($rigipsecurity === false) { $error = socket_strerror(socket_last_error()); $msg = "socket create(TCP) failed"; $error = "ERR: $msg '$error'\n"; } else $error = "ERR: socket create(TCP) failed\n"; return null; } // Ignore if this fails since the socket connect may work anyway // and nothing is gained by aborting if the option cannot be set // since we don't know in advance if it can connect socket_set_option($socket, SOL_SOCKET, SO_SNDTIMEO, array('sec' => $socksndtimeoutsec, 'usec' => 0)); socket_set_option($socket, SOL_SOCKET, SO_RCVTIMEO, array('sec' => $sockrcvtimeoutsec, 'usec' => 0)); $res = socket_connect($socket, $addr, $port); if ($res === false) { $haderror = true; if ($rigipsecurity === false) { $error = socket_strerror(socket_last_error()); $msg = "socket connect($addr,$port) failed"; $error = "ERR: $msg '$error'\n"; } else $error = "ERR: socket connect($rig) failed\n"; socket_close($socket); return null; } return $socket; } # function readsockline($socket) { $line = ''; while (true) { $byte = socket_read($socket, 1); if ($byte === false || $byte === '') break; if ($byte === "\0") break; $line .= $byte; } return $line; } # function api_convert_escape($str) { $res = ''; $len = strlen($str); for ($i = 0; $i < $len; $i++) { $ch = substr($str, $i, 1); if ($ch != '\\' || $i == ($len-1)) $res .= $ch; else { $i++; $ch = substr($str, $i, 1); switch ($ch) { case '|': $res .= "\1"; break; case '\\': $res .= "\2"; break; case '=': $res .= "\3"; break; case ',': $res .= "\4"; break; default: $res .= $ch; } } } return $res; } # function revert($str) { return str_replace(array("\1", "\2", "\3", "\4"), array("|", "\\", "=", ","), $str); } # function api($rig, $cmd) { global $haderror, $error; global $miner, $port, $hidefields; global $per_proc; if ($per_proc) { $cmd = preg_replace('/^devs\b/', 'procs', $cmd); $cmd = preg_replace('/^pga/', 'proc', $cmd); } $socket = getsock($rig, $miner, $port); if ($socket != null) { socket_write($socket, $cmd, strlen($cmd)); $line = readsockline($socket); socket_close($socket); if (strlen($line) == 0) { $haderror = true; $error = "WARN: '$cmd' returned nothing\n"; return $line; } # print "$cmd returned '$line'\n"; $line = api_convert_escape($line); $data = array(); $objs = explode('|', $line); foreach ($objs as $obj) { if (strlen($obj) > 0) { $items = explode(',', $obj); $item = $items[0]; $id = explode('=', $items[0], 2); if (count($id) == 1 or !ctype_digit($id[1])) $name = $id[0]; else $name = $id[0].$id[1]; if (strlen($name) == 0) $name = 'null'; $sectionname = preg_replace('/\d/', '', $name); if (isset($data[$name])) { $num = 1; while (isset($data[$name.$num])) $num++; $name .= $num; } $counter = 0; foreach ($items as $item) { $id = explode('=', $item, 2); if (isset($hidefields[$sectionname.'.'.$id[0]])) continue; if (count($id) == 2) $data[$name][$id[0]] = revert($id[1]); else $data[$name][$counter] = $id[0]; $counter++; } } } return $data; } return null; } # function getparam($name, $both = false) { $a = null; if (isset($_POST[$name])) $a = $_POST[$name]; if (($both === true) and ($a === null)) { if (isset($_GET[$name])) $a = $_GET[$name]; } if ($a == '' || $a == null) return null; // limit to 1K just to be safe return substr($a, 0, 1024); } # function newtable() { global $tablebegin, $rownum; echo $tablebegin; $rownum = 0; } # function newrow() { echo ''; } # function othrow($row) { return "$row"; } # function otherrow($row) { echo othrow($row); } # function endrow() { global $rownum; echo ''; $rownum++; } # function endtable() { global $tableend; echo $tableend; } # function classlastshare($when, $alldata, $warnclass, $errorclass) { global $checklastshare; if ($checklastshare === false) return ''; if ($when == 0) return ''; if (!isset($alldata['MHS av'])) return ''; if ($alldata['MHS av'] == 0) return ''; if (!isset($alldata['Last Share Time'])) return ''; if (!isset($alldata['Last Share Difficulty'])) return ''; $expected = pow(2, 32) / ($alldata['MHS av'] * pow(10, 6)); // If the share difficulty changes while waiting on a share, // this calculation will of course be incorrect $expected *= $alldata['Last Share Difficulty']; $howlong = $when - $alldata['Last Share Time']; if ($howlong < 1) $howlong = 1; if ($howlong > ($expected * 12)) return $errorclass; if ($howlong > ($expected * 8)) return $warnclass; return ''; } # function endzero($num) { $rep = preg_replace('/0*$/', '', $num); if ($rep === '') $rep = '0'; return $rep; } # function fmt($section, $name, $value, $when, $alldata) { global $dfmt, $rownum; if ($alldata == null) $alldata = array(); $errorclass = ' class=err'; $warnclass = ' class=warn'; $lstclass = ' class=lst'; $hiclass = ' class=hi'; $loclass = ' class=lo'; $c2class = ' class=two'; $totclass = ' class=tot'; $b = ' '; $ret = $value; $class = ''; $nams = explode('.', $name); if (count($nams) > 1) $name = $nams[count($nams)-1]; if ($value === null) $ret = $b; else switch ($section.'.'.$name) { case 'GPU.Last Share Time': case 'PGA.Last Share Time': case 'DEVS.Last Share Time': if ($value == 0 || (isset($alldata['Last Share Pool']) && $alldata['Last Share Pool'] == -1)) { $ret = 'Never'; $class = $warnclass; } else { $ret = date('H:i:s', $value); $class = classlastshare($when, $alldata, $warnclass, $errorclass); } break; case 'GPU.Last Valid Work': case 'PGA.Last Valid Work': case 'DEVS.Last Valid Work': if ($value == 0) $ret = 'Never'; else $ret = ($value - $when) . 's'; break; case 'POOL.Last Share Time': if ($value == 0) $ret = 'Never'; else $ret = date('H:i:s d-M', $value); break; case 'GPU.Last Share Pool': case 'PGA.Last Share Pool': case 'DEVS.Last Share Pool': if ($value == -1) { $ret = 'None'; $class = $warnclass; } break; case 'SUMMARY.Elapsed': case 'STATS.Elapsed': $s = $value % 60; $value -= $s; $value /= 60; if ($value == 0) $ret = $s.'s'; else { $m = $value % 60; $value -= $m; $value /= 60; if ($value == 0) $ret = sprintf("%dm$b%02ds", $m, $s); else { $h = $value % 24; $value -= $h; $value /= 24; if ($value == 0) $ret = sprintf("%dh$b%02dm$b%02ds", $h, $m, $s); else { if ($value == 1) $days = ''; else $days = 's'; $ret = sprintf("%dday$days$b%02dh$b%02dm$b%02ds", $value, $h, $m, $s); } } } break; case 'NOTIFY.Last Well': if ($value == '0') { $ret = 'Never'; $class = $warnclass; } else $ret = date('H:i:s', $value); break; case 'NOTIFY.Last Not Well': if ($value == '0') $ret = 'Never'; else { $ret = date('H:i:s', $value); $class = $errorclass; } break; case 'NOTIFY.Reason Not Well': if ($value != 'None') $class = $errorclass; break; case 'GPU.Utility': case 'PGA.Utility': case 'DEVS.Utility': case 'SUMMARY.Utility': case 'total.Utility': $ret = $value.'/m'; if ($value == 0) $class = $errorclass; else if (isset($alldata['Difficulty Accepted']) && isset($alldata['Accepted']) && isset($alldata['MHS av']) && ($alldata['Difficulty Accepted'] > 0) && ($alldata['Accepted'] > 0)) { $expected = 60 * $alldata['MHS av'] * (pow(10, 6) / pow(2, 32)); if ($expected == 0) $expected = 0.000001; // 1 H/s $da = $alldata['Difficulty Accepted']; $a = $alldata['Accepted']; $expected /= ($da / $a); $ratio = $value / $expected; if ($ratio < 0.9) $class = $loclass; else if ($ratio > 1.1) $class = $hiclass; } break; case 'PGA.Temperature': case 'GPU.Temperature': case 'DEVS.Temperature': $ret = $value.'°C'; if (!isset($alldata['GPU'])) { if ($value == 0) $ret = ' '; break; } case 'GPU.GPU Clock': case 'DEVS.GPU Clock': case 'GPU.Memory Clock': case 'DEVS.Memory Clock': case 'GPU.GPU Voltage': case 'DEVS.GPU Voltage': case 'GPU.GPU Activity': case 'DEVS.GPU Activity': if ($value == 0) $class = $warnclass; break; case 'GPU.Fan Percent': case 'DEVS.Fan Percent': if ($value == 0) $class = $warnclass; else { if ($value == 100) $class = $errorclass; else if ($value > 85) $class = $warnclass; } break; case 'GPU.Fan Speed': case 'DEVS.Fan Speed': if ($value == 0) $class = $warnclass; else if (isset($alldata['Fan Percent'])) { $test = $alldata['Fan Percent']; if ($test == 100) $class = $errorclass; else if ($test > 85) $class = $warnclass; } break; case 'GPU.MHS av': case 'PGA.MHS av': case 'DEVS.MHS av': case 'SUMMARY.MHS av': case 'total.MHS av': $parts = explode('.', $value, 2); if (count($parts) == 1) $dec = ''; else $dec = '.'.$parts[1]; $ret = number_format((float)$parts[0]).$dec; if ($value == 0) $class = $errorclass; else if (isset($alldata['Difficulty Accepted']) && isset($alldata['Accepted']) && isset($alldata['Utility']) && ($alldata['Difficulty Accepted'] > 0) && ($alldata['Accepted'] > 0)) { $expected = 60 * $value * (pow(10, 6) / pow(2, 32)); if ($expected == 0) $expected = 0.000001; // 1 H/s $da = $alldata['Difficulty Accepted']; $a = $alldata['Accepted']; $expected /= ($da / $a); $ratio = $alldata['Utility'] / $expected; if ($ratio < 0.9) $class = $hiclass; else if ($ratio > 1.1) $class = $loclass; } break; case 'GPU.Total MH': case 'PGA.Total MH': case 'DEVS.Total MH': case 'SUMMARY.Total MH': case 'total.Total MH': case 'SUMMARY.Getworks': case 'POOL.Getworks': case 'total.Getworks': case 'GPU.Accepted': case 'PGA.Accepted': case 'DEVS.Accepted': case 'SUMMARY.Accepted': case 'POOL.Accepted': case 'total.Accepted': case 'GPU.Rejected': case 'PGA.Rejected': case 'DEVS.Rejected': case 'SUMMARY.Rejected': case 'POOL.Rejected': case 'total.Rejected': case 'SUMMARY.Local Work': case 'total.Local Work': case 'SUMMARY.Discarded': case 'POOL.Discarded': case 'total.Discarded': case 'POOL.Diff1 Shares': case 'total.Diff1 Shares': case 'GPU.Diff1 Work': case 'PGA.Diff1 Work': case 'total.Diff1 Work': case 'STATS.Times Sent': case 'STATS.Bytes Sent': case 'STATS.Net Bytes Sent': case 'STATS.Times Recv': case 'STATS.Bytes Recv': case 'STATS.Net Bytes Recv': case 'total.Times Sent': case 'total.Bytes Sent': case 'total.Net Bytes Sent': case 'total.Times Recv': case 'total.Bytes Recv': case 'total.Net Bytes Recv': $parts = explode('.', $value, 2); if (count($parts) == 1) $dec = ''; else $dec = '.'.$parts[1]; $ret = number_format((float)$parts[0]).$dec; break; case 'STATS.Hs': case 'STATS.W': case 'STATS.history_time': case 'STATS.Pool Wait': case 'STATS.Pool Max': case 'STATS.Pool Min': case 'STATS.Pool Av': case 'STATS.Min Diff': case 'STATS.Max Diff': case 'STATS.Work Diff': $parts = explode('.', $value, 2); if (count($parts) == 1) $dec = ''; else $dec = '.'.endzero($parts[1]); $ret = number_format((float)$parts[0]).$dec; break; case 'GPU.Status': case 'PGA.Status': case 'DEVS.Status': case 'POOL.Status': if ($value != 'Alive') $class = $errorclass; break; case 'GPU.Enabled': case 'PGA.Enabled': case 'DEVS.Enabled': if ($value != 'Y') $class = $warnclass; break; case 'STATUS.When': case 'COIN.Current Block Time': $ret = date($dfmt, $value); break; case 'BUTTON.Rig': case 'BUTTON.Pool': case 'BUTTON.GPU': $ret = $value; break; case 'SUMMARY.Difficulty Accepted': case 'GPU.Difficulty Accepted': case 'PGA.Difficulty Accepted': case 'DEVS.Difficulty Accepted': case 'POOL.Difficulty Accepted': case 'total.Difficulty Accepted': case 'SUMMARY.Difficulty Rejected': case 'GPU.Difficulty Rejected': case 'PGA.Difficulty Rejected': case 'DEVS.Difficulty Rejected': case 'POOL.Difficulty Rejected': case 'total.Difficulty Rejected': case 'SUMMARY.Difficulty Stale': case 'POOL.Difficulty Stale': case 'total.Difficulty Stale': case 'GPU.Last Share Difficulty': case 'PGA.Last Share Difficulty': case 'DEVS.Last Share Difficulty': case 'POOL.Last Share Difficulty': if ($value != '') $ret = number_format((float)$value, 2); break; case 'DEVS.Device Hardware%': case 'DEVS.Device Rejected%': case 'PGA.Device Hardware%': case 'PGA.Device Rejected%': case 'GPU.Device Hardware%': case 'GPU.Device Rejected%': case 'POOL.Pool Rejected%': case 'POOL.Pool Stale%': case 'SUMMARY.Device Hardware%': case 'SUMMARY.Device Rejected%': case 'SUMMARY.Pool Rejected%': case 'SUMMARY.Pool Stale%': if ($value != '') $ret = number_format((float)$value, 2) . '%'; break; case 'SUMMARY.Best Share': if ($value != '') $ret = number_format((float)$value); break; } if ($section == 'NOTIFY' && substr($name, 0, 1) == '*' && $value != '0') $class = $errorclass; if ($class == '' && $section != 'POOL') $class = classlastshare($when, $alldata, $lstclass, $lstclass); if ($class == '' && $section == 'total') $class = $totclass; if ($class == '' && ($rownum % 2) == 0) $class = $c2class; if ($ret === '') $ret = $b; return array($ret, $class); } # global $poolcmd; $poolcmd = array( 'Switch to' => 'switchpool', 'Enable' => 'enablepool', 'Disable' => 'disablepool', 'Remove' => 'removepool' ); # function showhead($cmd, $values, $justnames = false) { global $poolcmd, $readonly; newrow(); foreach ($values as $name => $value) { if ($name == '0' or $name == '') $name = ' '; echo ""; } if ($justnames === false && $cmd == 'pools' && $readonly === false) foreach ($poolcmd as $name => $pcmd) echo ""; endrow(); } # function showdatetime() { global $dfmt; otherrow(''); } # global $singlerigsum; $singlerigsum = array( 'devs' => array('MHS av' => 1, 'MHS 5s' => 1, 'Accepted' => 1, 'Rejected' => 1, 'Temperature' => 2, 'Hardware Errors' => 1, 'Utility' => 1, 'Total MH' => 1), 'pools' => array('Getworks' => 1, 'Accepted' => 1, 'Rejected' => 1, 'Discarded' => 1, 'Stale' => 1, 'Get Failures' => 1, 'Remote Failures' => 1), 'notify' => array('*' => 1)); # function showtotal($total, $when, $oldvalues) { global $rigtotals; list($showvalue, $class) = fmt('total', '', 'Total:', $when, null); echo "$showvalue"; $skipfirst = true; foreach ($oldvalues as $name => $value) { if ($skipfirst === true) { $skipfirst = false; continue; } if (isset($total[$name])) $newvalue = $total[$name]; else $newvalue = ''; list($showvalue, $class) = fmt('total', $name, $newvalue, $when, null); echo "$showvalue"; } } # function details($cmd, $list, $rig) { global $dfmt, $poolcmd, $readonly, $showndate; global $rownum, $rigtotals, $forcerigtotals, $singlerigsum; $when = 0; $stas = array('S' => 'Success', 'W' => 'Warning', 'I' => 'Informational', 'E' => 'Error', 'F' => 'Fatal'); newtable(); if ($showndate === false) { showdatetime(); endtable(); newtable(); $showndate = true; } if (isset($list['STATUS'])) { newrow(); echo ''; if (isset($list['STATUS']['When'])) { echo ''; $when = $list['STATUS']['When']; } $sta = $list['STATUS']['STATUS']; echo ''; echo ''; endrow(); } if ($rigtotals === true && isset($singlerigsum[$cmd])) $dototal = $singlerigsum[$cmd]; else $dototal = array(); $total = array(); $section = ''; $oldvalues = null; // Build a common row column for all entries $columns = array(); $columnsByIndex = array(); foreach ($list as $item => $values) { if ($item == 'STATUS') continue; if (isset($values['ID'])) { $repr = $values['Name'].$values['ID']; if (isset($values['ProcID'])) $repr .= join_get_field('ProcID', $values); $list[$item] = $values = array('Device' => $repr) + array_slice($values, 1); unset($values['Name']); unset($values['ID']); unset($values['ProcID']); } $namesByIndex = array_keys($values); $nameCount = count($namesByIndex); for ($i = 0; $i < $nameCount; ++$i) { $name = $namesByIndex[$i]; if (isset($columns[$name])) continue; $value = $values[$name]; $before = null; for ($j = $i + 1; $j < $nameCount; ++$j) { $maybebefore = $namesByIndex[$j]; if (isset($columns[$maybebefore])) { $before = $columns[$maybebefore]; break; } } if (!$before) { $columns[$name] = array_push($columnsByIndex, $name) - 1; continue; } array_splice($columnsByIndex, $before, 0, $name); $columns[$name] = $before; $columnCount = count($columnsByIndex); for ($j = $before + 1; $j < $columnCount; ++$j) $columns[$columnsByIndex[$j]] = $j; } } asort($columns); endtable(); newtable(); showhead($cmd, $columns); foreach ($list as $item => $values) { if ($item == 'STATUS') continue; newrow(); foreach ($columns as $name => $columnidx) { if (!isset($values[$name])) { echo ''; continue; } $value = $values[$name]; list($showvalue, $class) = fmt($section, $name, $value, $when, $values); echo "$showvalue"; if (isset($dototal[$name]) || (isset($dototal['*']) and substr($name, 0, 1) == '*')) { if (isset($total[$name])) { if (isset($dototal[$name]) && $dototal[$name] == 2) $total[$name] = max($total[$name], $value); else $total[$name] += $value; } else $total[$name] = $value; } } if ($cmd == 'pools' && $readonly === false) { reset($values); $pool = current($values); foreach ($poolcmd as $name => $pcmd) { list($ignore, $class) = fmt('BUTTON', 'Pool', '', $when, $values); echo ""; if ($pool === false) echo ' '; else { echo ""; } echo ''; } } endrow(); $oldvalues = $values; } if ($oldvalues != null && count($total) > 0 && ($rownum > 2 || $forcerigtotals === true)) showtotal($total, $when, $columns); endtable(); } # global $devs; $devs = null; # function gpubuttons($count, $rig) { global $devs; $basic = array( 'GPU', 'Enable', 'Disable', 'Restart' ); $options = array( 'intensity' => 'Intensity', 'fan' => 'Fan Percent', 'engine' => 'GPU Clock', 'mem' => 'Memory Clock', 'vddc' => 'GPU Voltage' ); newtable(); newrow(); foreach ($basic as $head) echo ""; foreach ($options as $name => $des) echo ""; $n = 0; for ($c = 0; $c < $count; $c++) { endrow(); newrow(); foreach ($basic as $name) { list($ignore, $class) = fmt('BUTTON', 'GPU', '', 0, null); echo ""; if ($name == 'GPU') echo $c; else { echo ""; } echo ''; } foreach ($options as $name => $des) { list($ignore, $class) = fmt('BUTTON', 'GPU', '', 0, null); echo ""; if (!isset($devs["GPU$c"][$des])) echo ' '; else { $value = $devs["GPU$c"][$des]; echo ""; echo ""; $n++; } echo ''; } } endrow(); endtable(); } # function processgpus($rig) { global $error; global $warnfont, $warnoff; $gpus = api($rig, 'gpucount'); if ($error != null) otherrow(""); else { if (!isset($gpus['GPUS']['Count'])) { $rw = ''; otherrow($rw); } else { $count = $gpus['GPUS']['Count']; if ($count == 0) otherrow(''); else gpubuttons($count, $rig); } } } # function showpoolinputs($rig, $ans) { global $readonly, $poolinputs; if ($readonly === true || $poolinputs === false) return; newtable(); newrow(); $inps = array('Pool URL' => array('purl', 20), 'Worker Name' => array('pwork', 10), 'Worker Password' => array('ppass', 10)); $b = ' '; echo ""; endrow(); if (count($ans) > 1) { newrow(); echo ''; echo ""; endrow(); } endtable(); } # function process($cmds, $rig) { global $error, $devs; global $warnfont, $warnoff; $count = count($cmds); foreach ($cmds as $cmd => $des) { $process = api($rig, $cmd); if ($error != null) { otherrow(""); break; } else { details($cmd, $process, $rig); if ($cmd == 'devs') $devs = $process; if ($cmd == 'pools') showpoolinputs($rig, $process); # Not after the last one if (--$count > 0) otherrow(''); } } } # function rigname($rig, $rigname) { global $rigs; if (isset($rigs[$rig])) { $parts = explode(':', $rigs[$rig], 3); if (count($parts) == 3) $rigname = $parts[2]; } return $rigname; } # function riginput($rig, $rigname) { $rigname = rigname($rig, $rigname); return ""; } # function rigbutton($rig, $rigname, $when, $row) { list($value, $class) = fmt('BUTTON', 'Rig', '', $when, $row); if ($rig === '') $ri = ' '; else $ri = riginput($rig, $rigname); return ""; } # function showrigs($anss, $headname, $rigname) { $dthead = array($headname => 1, 'STATUS' => 1, 'Description' => 1, 'When' => 1, 'API' => 1, 'CGMiner' => 1); showhead('', $dthead); foreach ($anss as $rig => $ans) { if ($ans == null) continue; newrow(); $when = 0; if (isset($ans['STATUS']['When'])) $when = $ans['STATUS']['When']; foreach ($ans as $item => $row) { if ($item != 'STATUS' && $item != 'VERSION') continue; foreach ($dthead as $name => $x) { if ($item == 'STATUS' && $name == $headname) echo rigbutton($rig, $rigname.$rig, $when, null); else { if (isset($row[$name])) { list($showvalue, $class) = fmt('STATUS', $name, $row[$name], $when, null); echo "$showvalue"; } } } } endrow(); } } # # $head is a hack but this is just a demo anyway :) function doforeach($cmd, $des, $sum, $head, $datetime) { global $miner, $port; global $error, $readonly, $notify, $rigs; global $warnfont, $warnoff, $dfmt; global $rigerror; $when = 0; $header = $head; $anss = array(); $count = 0; $preverr = count($rigerror); foreach ($rigs as $num => $rig) { $anss[$num] = null; if (isset($rigerror[$rig])) continue; $parts = explode(':', $rig, 3); if (count($parts) >= 2) { $miner = $parts[0]; $port = $parts[1]; if (count($parts) > 2) $name = $parts[2]; else $name = $num; $ans = api($name, $cmd); if ($error != null) { $rw = ""; otherrow($rw); $rigerror[$rig] = $error; $error = null; } else { $anss[$num] = $ans; $count++; } } } if ($count == 0) { $rw = ''; otherrow($rw); return; } if ($datetime) { showdatetime(); endtable(); newtable(); showrigs($anss, '', 'Rig '); endtable(); otherrow(''); newtable(); return; } $total = array(); foreach ($anss as $rig => $ans) { if ($ans == null) continue; foreach ($ans as $item => $row) { if ($item == 'STATUS') continue; if (count($row) > count($header)) { $header = $head; foreach ($row as $name => $value) if (!isset($header[$name])) $header[$name] = ''; } if ($sum != null) foreach ($sum as $name) { if (isset($row[$name])) { if (isset($total[$name])) $total[$name] += $row[$name]; else $total[$name] = $row[$name]; } } } } if ($sum != null) $anss['total']['total'] = $total; showhead('', $header); foreach ($anss as $rig => $ans) { if ($ans == null) continue; $when = 0; if (isset($ans['STATUS']['When'])) $when = $ans['STATUS']['When']; foreach ($ans as $item => $row) { if ($item == 'STATUS') continue; newrow(); $section = preg_replace('/\d/', '', $item); foreach ($header as $name => $x) { if ($name == '') { if ($rig === 'total') { list($ignore, $class) = fmt($rig, '', '', $when, $row); echo ""; } else echo rigbutton($rig, "Rig $rig", $when, $row); } else { if (isset($row[$name])) $value = $row[$name]; else $value = null; list($showvalue, $class) = fmt($section, $name, $value, $when, $row); echo "$showvalue"; } } endrow(); } } } # function refreshbuttons() { global $ignorerefresh, $changerefresh, $autorefresh; if ($ignorerefresh == false && $changerefresh == true) { echo '    '; echo ""; echo ""; echo ""; } } # function pagebuttons($rig, $pg) { global $readonly, $rigs, $userlist, $ses; global $allowcustompages, $customsummarypages; if ($rig === null) { $prev = null; $next = null; if ($pg === null) $refresh = ''; else $refresh = "&pg=$pg"; } else { switch (count($rigs)) { case 0: case 1: $prev = null; $next = null; break; case 2: $prev = null; $next = ($rig + 1) % count($rigs); break; default: $prev = ($rig - 1) % count($rigs); $next = ($rig + 1) % count($rigs); break; } $refresh = "&rig=$rig"; } echo '"; } # function doOne($rig, $preprocess) { global $haderror, $readonly, $notify, $rigs; global $placebuttons; if ($placebuttons == 'top' || $placebuttons == 'both') pagebuttons($rig, null); if ($preprocess != null) process(array($preprocess => $preprocess), $rig); $cmds = array( 'devs' => 'device list', 'summary' => 'summary information', 'pools' => 'pool list'); if ($notify) $cmds['notify'] = 'device status'; $cmds['config'] = 'BFGMiner config'; process($cmds, $rig); if ($haderror == false && $readonly === false) processgpus($rig); if ($placebuttons == 'bot' || $placebuttons == 'both') pagebuttons($rig, null); } # global $sectionmap; # map sections to their api command # DEVS is a special case that will match GPU or PGA # so you can have a single table with both in it # DATE is hard coded so not in here $sectionmap = array( 'RIGS' => 'version', 'SUMMARY' => 'summary', 'POOL' => 'pools', 'DEVS' => 'devs', 'GPU' => 'devs', // You would normally use DEVS 'PGA' => 'devs', // You would normally use DEVS 'NOTIFY' => 'notify', 'DEVDETAILS' => 'devdetails', 'STATS' => 'stats', 'CONFIG' => 'config', 'COIN' => 'coin'); # function joinfields($section1, $section2, $join, $results) { global $sectionmap; $name1 = $sectionmap[$section1]; $name2 = $sectionmap[$section2]; $newres = array(); // foreach rig in section1 foreach ($results[$name1] as $rig => $result) { $status = null; // foreach answer section in the rig api call foreach ($result as $name1b => $fields1b) { if ($name1b == 'STATUS') { // remember the STATUS from section1 $status = $result[$name1b]; continue; } // foreach answer section in the rig api call (for the other api command) foreach ($results[$name2][$rig] as $name2b => $fields2b) { if ($name2b == 'STATUS') continue; // If match the same field values of fields in $join $match = true; foreach ($join as $field) if ($fields1b[$field] != $fields2b[$field]) { $match = false; break; } if ($match === true) { if ($status != null) { $newres[$rig]['STATUS'] = $status; $status = null; } $subsection = $section1.'+'.$section2; $subsection .= preg_replace('/[^0-9]/', '', $name1b.$name2b); foreach ($fields1b as $nam => $val) $newres[$rig][$subsection]["$section1.$nam"] = $val; foreach ($fields2b as $nam => $val) $newres[$rig][$subsection]["$section2.$nam"] = $val; } } } } return $newres; } # function join_get_field($field, $fields) { // : means a string constant otherwise it's a field name // ProcID field name is converted to a lowercase letter if (substr($field, 0, 1) == ':') return substr($field, 1); else if ($field == 'ProcID') return chr(97 + $fields[$field]); else return $fields[$field]; } # function joinlr($section1, $section2, $join, $results) { global $sectionmap; $name1 = $sectionmap[$section1]; $name2 = $sectionmap[$section2]; $newres = array(); // foreach rig in section1 foreach ($results[$name1] as $rig => $result) { $status = null; // foreach answer section in the rig api call foreach ($result as $name1b => $fields1b) { if ($name1b == 'STATUS') { // remember the STATUS from section1 $status = $result[$name1b]; continue; } // Build L string to be matched $Lval = ''; foreach ($join['L'] as $field) $Lval .= join_get_field($field, $fields1b); // foreach answer section in the rig api call (for the other api command) foreach ($results[$name2][$rig] as $name2b => $fields2b) { if ($name2b == 'STATUS') continue; // Build R string and compare $Rval = ''; foreach ($join['R'] as $field) $Rval .= join_get_field($field, $fields2b); if ($Lval === $Rval) { if ($status != null) { $newres[$rig]['STATUS'] = $status; $status = null; } $subsection = $section1.'+'.$section2; $subsection .= preg_replace('/[^0-9]/', '', $name1b.$name2b); foreach ($fields1b as $nam => $val) $newres[$rig][$subsection]["$section1.$nam"] = $val; foreach ($fields2b as $nam => $val) $newres[$rig][$subsection]["$section2.$nam"] = $val; } } } } return $newres; } # function joinall($section1, $section2, $results) { global $sectionmap; $name1 = $sectionmap[$section1]; $name2 = $sectionmap[$section2]; $newres = array(); // foreach rig in section1 foreach ($results[$name1] as $rig => $result) { // foreach answer section in the rig api call foreach ($result as $name1b => $fields1b) { if ($name1b == 'STATUS') { // copy the STATUS from section1 $newres[$rig][$name1b] = $result[$name1b]; continue; } // foreach answer section in the rig api call (for the other api command) foreach ($results[$name2][$rig] as $name2b => $fields2b) { if ($name2b == 'STATUS') continue; $subsection = $section1.'+'.$section2; $subsection .= preg_replace('/[^0-9]/', '', $name1b.$name2b); foreach ($fields1b as $nam => $val) $newres[$rig][$subsection]["$section1.$nam"] = $val; foreach ($fields2b as $nam => $val) $newres[$rig][$subsection]["$section2.$nam"] = $val; } } } return $newres; } # function joinsections($sections, $results, $errors) { global $sectionmap; // GPU's don't have Name,ID,ProcID fields - so create them foreach ($results as $section => $res) foreach ($res as $rig => $result) foreach ($result as $name => $fields) { $subname = preg_replace('/[0-9]/', '', $name); if ($subname == 'GPU' and isset($result[$name]['GPU'])) { $results[$section][$rig][$name]['Name'] = 'GPU'; $results[$section][$rig][$name]['ID'] = $result[$name]['GPU']; $results[$section][$rig][$name]['ProcID'] = 0; } } foreach ($sections as $section => $fields) if ($section != 'DATE' && !isset($sectionmap[$section])) { $both = explode('+', $section, 2); if (count($both) > 1) { switch($both[0]) { case 'SUMMARY': switch($both[1]) { case 'POOL': case 'DEVS': case 'CONFIG': case 'COIN': $sectionmap[$section] = $section; $results[$section] = joinall($both[0], $both[1], $results); break; default: $errors[] = "Error: Invalid section '$section'"; break; } break; case 'DEVS': switch($both[1]) { case 'NOTIFY': case 'DEVDETAILS': case 'USBSTATS': $join = array('Name', 'ID', 'ProcID'); $sectionmap[$section] = $section; $results[$section] = joinfields($both[0], $both[1], $join, $results); break; case 'STATS': $join = array('L' => array('Name','ID','ProcID'), 'R' => array('ID')); $sectionmap[$section] = $section; $results[$section] = joinlr($both[0], $both[1], $join, $results); break; default: $errors[] = "Error: Invalid section '$section'"; break; } break; case 'POOL': switch($both[1]) { case 'STATS': $join = array('L' => array(':POOL','POOL'), 'R' => array('ID')); $sectionmap[$section] = $section; $results[$section] = joinlr($both[0], $both[1], $join, $results); break; default: $errors[] = "Error: Invalid section '$section'"; break; } break; default: $errors[] = "Error: Invalid section '$section'"; break; } } else $errors[] = "Error: Invalid section '$section'"; } return array($results, $errors); } # function secmatch($section, $field) { if ($section == $field) return true; if ($section == 'DEVS' && ($field == 'GPU' || $field == 'PGA')) return true; return false; } # function customset($showfields, $sum, $section, $rig, $isbutton, $result, $total) { foreach ($result as $sec => $row) { $secname = preg_replace('/\d/', '', $sec); if ($sec != 'total') if (!secmatch($section, $secname)) continue; newrow(); $when = 0; if (isset($result['STATUS']['When'])) $when = $result['STATUS']['When']; if ($isbutton) echo rigbutton($rig, $rig, $when, $row); else { list($ignore, $class) = fmt('total', '', '', $when, $row); echo ""; } foreach ($showfields as $name => $one) { if (isset($row[$name])) { $value = $row[$name]; if (isset($sum[$section][$name])) { if (isset($total[$name])) $total[$name] += $value; else $total[$name] = $value; } } else { if ($sec == 'total' && isset($total[$name])) $value = $total[$name]; else $value = null; } if (strpos($secname, '+') === false) list($showvalue, $class) = fmt($secname, $name, $value, $when, $row); else { $parts = explode('.', $name, 2); list($showvalue, $class) = fmt($parts[0], $parts[1], $value, $when, $row); } echo "$showvalue"; } endrow(); } return $total; } # function docalc($func, $data) { switch ($func) { case 'sum': $tot = 0; foreach ($data as $val) $tot += $val; return $tot; case 'avg': $tot = 0; foreach ($data as $val) $tot += $val; return ($tot / count($data)); case 'min': $ans = null; foreach ($data as $val) if ($ans === null) $ans = $val; else if ($val < $ans) $ans = $val; return $ans; case 'max': $ans = null; foreach ($data as $val) if ($ans === null) $ans = $val; else if ($val > $ans) $ans = $val; return $ans; case 'lo': $ans = null; foreach ($data as $val) if ($ans === null) $ans = $val; else if (strcasecmp($val, $ans) < 0) $ans = $val; return $ans; case 'hi': $ans = null; foreach ($data as $val) if ($ans === null) $ans = $val; else if (strcasecmp($val, $ans) > 0) $ans = $val; return $ans; case 'count': return count($data); case 'any': default: return $data[0]; } } # function docompare($row, $test) { // invalid $test data means true if (count($test) < 2) return true; if (isset($row[$test[0]])) $val = $row[$test[0]]; else $val = null; if ($test[1] == 'set') return ($val !== null); if ($val === null || count($test) < 3) return true; switch($test[1]) { case '=': return ($val == $test[2]); case '<': return ($val < $test[2]); case '<=': return ($val <= $test[2]); case '>': return ($val > $test[2]); case '>=': return ($val >= $test[2]); case 'eq': return (strcasecmp($val, $test[2]) == 0); case 'lt': return (strcasecmp($val, $test[2]) < 0); case 'le': return (strcasecmp($val, $test[2]) <= 0); case 'gt': return (strcasecmp($val, $test[2]) > 0); case 'ge': return (strcasecmp($val, $test[2]) >= 0); default: return true; } } # function processcompare($which, $ext, $section, $res) { if (isset($ext[$section][$which])) { $proc = $ext[$section][$which]; if ($proc !== null) { $res2 = array(); foreach ($res as $rig => $result) foreach ($result as $sec => $row) { $secname = preg_replace('/\d/', '', $sec); if (!secmatch($section, $secname)) $res2[$rig][$sec] = $row; else { $keep = true; foreach ($proc as $test) if (!docompare($row, $test)) { $keep = false; break; } if ($keep) $res2[$rig][$sec] = $row; } } $res = $res2; } } return $res; } # function ss($a, $b) { $la = strlen($a); $lb = strlen($b); if ($la != $lb) return $lb - $la; return strcmp($a, $b); } # function genfld($row, $calc) { uksort($row, "ss"); foreach ($row as $name => $value) if (strstr($calc, $name) !== FALSE) $calc = str_replace($name, $value, $calc); eval("\$val = $calc;"); return $val; } # function dogen($ext, $section, &$res, &$fields) { $gen = $ext[$section]['gen']; foreach ($gen as $fld => $calc) $fields[] = "GEN.$fld"; foreach ($res as $rig => $result) foreach ($result as $sec => $row) { $secname = preg_replace('/\d/', '', $sec); if (secmatch($section, $secname)) foreach ($gen as $fld => $calc) { $name = "GEN.$fld"; $val = genfld($row, $calc); $res[$rig][$sec][$name] = $val; } } } # function processext($ext, $section, $res, &$fields) { global $allowgen; $res = processcompare('where', $ext, $section, $res); if (isset($ext[$section]['group'])) { $grp = $ext[$section]['group']; $calc = $ext[$section]['calc']; if ($grp !== null) { $interim = array(); $res2 = array(); $cou = 0; foreach ($res as $rig => $result) foreach ($result as $sec => $row) { $secname = preg_replace('/\d/', '', $sec); if (!secmatch($section, $secname)) { // STATUS may be problematic ... if (!isset($res2[$sec])) $res2[$sec] = $row; } else { $grpkey = ''; $newrow = array(); foreach ($grp as $field) { if (isset($row[$field])) { $grpkey .= $row[$field].'.'; $newrow[$field] = $row[$field]; } else $grpkey .= '.'; } if (!isset($interim[$grpkey])) { $interim[$grpkey]['grp'] = $newrow; $interim[$grpkey]['sec'] = $secname.$cou; $cou++; } if ($calc !== null) foreach ($calc as $field => $func) { if (!isset($interim[$grpkey]['cal'][$field])) $interim[$grpkey]['cal'][$field] = array(); $interim[$grpkey]['cal'][$field][] = $row[$field]; } } } // Build the rest of $res2 from $interim foreach ($interim as $rowkey => $row) { $key = $row['sec']; foreach ($row['grp'] as $field => $value) $res2[$key][$field] = $value; foreach ($row['cal'] as $field => $data) $res2[$key][$field] = docalc($calc[$field], $data); } $res = array('' => $res2); } } // Generated fields (functions of other fields) if ($allowgen === true && isset($ext[$section]['gen'])) dogen($ext, $section, $res, $fields); return processcompare('having', $ext, $section, $res); } # function processcustompage($pagename, $sections, $sum, $ext, $namemap) { global $sectionmap; global $miner, $port; global $rigs, $error; global $warnfont, $warnoff; global $dfmt; global $readonly, $showndate; $cmds = array(); $errors = array(); foreach ($sections as $section => $fields) { $all = explode('+', $section); foreach ($all as $section) { if (isset($sectionmap[$section])) { $cmd = $sectionmap[$section]; if (!isset($cmds[$cmd])) $cmds[$cmd] = 1; } else if ($section != 'DATE') $errors[] = "Error: unknown section '$section' in custom summary page '$pagename'"; } } $results = array(); foreach ($rigs as $num => $rig) { $parts = explode(':', $rig, 3); if (count($parts) >= 2) { $miner = $parts[0]; $port = $parts[1]; if (count($parts) > 2) $name = $parts[2]; else $name = $rig; foreach ($cmds as $cmd => $one) { $process = api($name, $cmd); if ($error != null) { $errors[] = "Error getting $cmd for $name $warnfont$error$warnoff"; break; } else $results[$cmd][$num] = $process; } } else otherrow(''); } $shownsomething = false; if (count($results) > 0) { list($results, $errors) = joinsections($sections, $results, $errors); $first = true; foreach ($sections as $section => $fields) { if ($section === 'DATE') { if ($shownsomething) otherrow(''); newtable(); showdatetime(); endtable(); // On top of the next table $shownsomething = false; continue; } if ($section === 'RIGS') { if ($shownsomething) otherrow(''); newtable(); showrigs($results['version'], 'Rig', ''); endtable(); $shownsomething = true; continue; } if (isset($results[$sectionmap[$section]])) { $rigresults = processext($ext, $section, $results[$sectionmap[$section]], $fields); $showfields = array(); $showhead = array(); foreach ($fields as $field) foreach ($rigresults as $result) foreach ($result as $sec => $row) { $secname = preg_replace('/\d/', '', $sec); if (secmatch($section, $secname)) { if ($field === '*') { foreach ($row as $f => $v) { $showfields[$f] = 1; $map = $section.'.'.$f; if (isset($namemap[$map])) $showhead[$namemap[$map]] = 1; else $showhead[$f] = 1; } } elseif (isset($row[$field])) { $showfields[$field] = 1; $map = $section.'.'.$field; if (isset($namemap[$map])) $showhead[$namemap[$map]] = 1; else $showhead[$field] = 1; } } } if (count($showfields) > 0) { if ($shownsomething) otherrow(''); newtable(); if (count($rigresults) == 1 && isset($rigresults[''])) $ri = array('' => 1) + $showhead; else $ri = array('Rig' => 1) + $showhead; showhead('', $ri, true); $total = array(); $add = array('total' => array()); foreach ($rigresults as $num => $result) $total = customset($showfields, $sum, $section, $num, true, $result, $total); if (count($total) > 0) customset($showfields, $sum, $section, 'Σ', false, $add, $total); $first = false; endtable(); $shownsomething = true; } } } } if (count($errors) > 0) { if (count($results) > 0) otherrow(''); foreach ($errors as $err) otherrow(""); } } # function showcustompage($pagename) { global $customsummarypages; global $placebuttons; if ($placebuttons == 'top' || $placebuttons == 'both') pagebuttons(null, $pagename); if (!isset($customsummarypages[$pagename])) { otherrow(""); return; } $c = count($customsummarypages[$pagename]); if ($c < 2 || $c > 3) { $rw = "'; otherrow($rw); return; } $page = $customsummarypages[$pagename][0]; $namemap = array(); foreach ($page as $name => $fields) { if ($fields === null) $page[$name] = array(); else foreach ($fields as $num => $field) { $pos = strpos($field, '='); if ($pos !== false) { $names = explode('=', $field, 2); if (strlen($names[1]) > 0) $namemap[$name.'.'.$names[0]] = $names[1]; $page[$name][$num] = $names[0]; } } } $ext = null; if (isset($customsummarypages[$pagename][2])) $ext = $customsummarypages[$pagename][2]; $sum = $customsummarypages[$pagename][1]; if ($sum === null) $sum = array(); // convert them to searchable via isset() foreach ($sum as $section => $fields) { $newfields = array(); foreach ($fields as $field) $newfields[$field] = 1; $sum[$section] = $newfields; } if (count($page) <= 1) { otherrow(""); return; } processcustompage($pagename, $page, $sum, $ext, $namemap); if ($placebuttons == 'bot' || $placebuttons == 'both') pagebuttons(null, $pagename); } # function onlylogin() { global $here; htmlhead('', false, null, null, true); ?> No rigs $action"); return; } else { if ($mcast === true && count($rigs) < $mcastexpect) $mcerr = othrow('"); } if ($ignorerefresh == false) { $ref = trim(getparam('ref', true)); if ($ref != null && $ref != '') $autorefresh = intval($ref); } if ($pagesonly !== true) { $rig = trim(getparam('rig', true)); $arg = trim(getparam('arg', true)); $preprocess = null; if ($arg != null and $arg != '') { if ($rig != null and $rig != '' and $rig >= 0 and $rig < count($rigs)) { $parts = explode(':', $rigs[$rig], 3); if (count($parts) >= 2) { $miner = $parts[0]; $port = $parts[1]; if ($readonly !== true) $preprocess = $arg; } } } } if ($allowcustompages === true) { $pg = urlencode(trim(getparam('pg', true))); if ($pagesonly === true) { if ($pg !== null && $pg !== '') { if ($userlist !== null && isset($userlist['def']) && !in_array($pg, $userlist['def'])) $pg = null; } else { if ($userlist !== null && isset($userlist['def'])) foreach ($userlist['def'] as $pglook) if (isset($customsummarypages[$pglook])) { $pg = $pglook; break; } } } if ($pg !== null && $pg !== '') { htmlhead($mcerr, false, null, $pg); showcustompage($pg, $mcerr); return; } } if ($pagesonly === true) { onlylogin(); return; } if (count($rigs) == 1) { $parts = explode(':', $rigs[0], 3); if (count($parts) >= 2) { $miner = $parts[0]; $port = $parts[1]; htmlhead($mcerr, true, 0); doOne(0, $preprocess); } else { minhead($mcerr); otherrow(''); } return; } if ($rig != null and $rig != '' and $rig >= 0 and $rig < count($rigs)) { $parts = explode(':', $rigs[$rig], 3); if (count($parts) >= 2) { $miner = $parts[0]; $port = $parts[1]; htmlhead($mcerr, true, 0); doOne($rig, $preprocess); } else { minhead($mcerr); otherrow(''); } return; } htmlhead($mcerr, false, null); if ($placebuttons == 'top' || $placebuttons == 'both') pagebuttons(null, null); if ($preprocess != null) process(array($preprocess => $preprocess), $rig); newtable(); doforeach('version', 'rig summary', array(), array(), true); $sum = array('MHS av', 'Getworks', 'Found Blocks', 'Accepted', 'Rejected', 'Discarded', 'Stale', 'Utility', 'Local Work', 'Total MH'); doforeach('summary', 'summary information', $sum, array(), false); endtable(); otherrow(''); newtable(); doforeach('devs', 'device list', $sum, array(''=>'','ProcID'=>'','ID'=>'','Name'=>''), false); endtable(); otherrow(''); newtable(); doforeach('pools', 'pool list', $sum, array(''=>''), false); endtable(); if ($placebuttons == 'bot' || $placebuttons == 'both') pagebuttons(null, null); } # if ($mcast === true) getrigs(); display(); # ?>
$name$nameDate: '.date($dfmt).'Computer: '.$list['STATUS']['Description'].'When: '.date($dfmt, $list['STATUS']['When']).'Status: '.$stas[$sta].'Message: '.$list['STATUS']['Msg'].'$head$desError getting GPU count: $warnfont$error$warnoffNo GPU count returned: '.$warnfont; $rw .= $gpus['STATUS']['STATUS'].' '.$gpus['STATUS']['Msg']; $rw .= $warnoff.'No GPUs Add a pool: "; foreach ($inps as $text => $name) echo "$text: "; echo " Set pool priorities: Comma list of pool numbers: "; echo "Error getting $des: $warnfont$error$warnoff

$riError on rig $name getting "; $rw .= "$des: $warnfont$error$warnoffFailed to access any rigs successfully'; if ($preverr > 0) $rw .= ' (or rigs had previous errors)'; $rw .= '

Total:
'; if ($userlist === null || isset($_SESSION[$ses])) { if ($prev !== null) echo riginput($prev, 'Prev').' '; echo " "; if ($next !== null) echo riginput($next, 'Next').' '; echo ' '; if (count($rigs) > 1) echo " "; } if ($allowcustompages === true) { if ($userlist === null || isset($_SESSION[$ses])) $list = $customsummarypages; else { if ($userlist !== null && isset($userlist['def'])) $list = array_flip($userlist['def']); else $list = array(); } foreach ($list as $pagename => $data) echo " "; } echo ' '; if ($rig !== null && $readonly === false) { $rg = ''; if (count($rigs) > 1) $rg = " Rig $rig"; echo ""; echo " "; } refreshbuttons(); if (isset($_SESSION[$ses])) echo " "; else if ($userlist !== null) echo " "; echo "
$rigBad "$rigs" array    $errUnknown custom summary page '$pagename'Invalid custom summary page '$pagename' ("; $rw .= count($customsummarypages[$pagename]).')Invalid custom summary page '$pagename' no content
 
 

LOGIN

Username:
Password:
Found '.count($rigs)." rigs but expected at least $mcastexpectInvalid "$rigs" arrayInvalid "$rigs" array



bfgminer-bfgminer-3.10.0/ocl.c000066400000000000000000001076261226556647300161300ustar00rootroot00000000000000/* * Copyright 2011-2013 Con Kolivas * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #ifdef HAVE_OPENCL #include #include #include #include #include #include #include #include #include #include #include #include #define OMIT_OPENCL_API #include "deviceapi.h" #include "findnonce.h" #include "logging.h" #include "ocl.h" /* Platform API */ extern CL_API_ENTRY cl_int CL_API_CALL (*clGetPlatformIDs)(cl_uint /* num_entries */, cl_platform_id * /* platforms */, cl_uint * /* num_platforms */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clGetPlatformInfo)(cl_platform_id /* platform */, cl_platform_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Device APIs */ extern CL_API_ENTRY cl_int CL_API_CALL (*clGetDeviceIDs)(cl_platform_id /* platform */, cl_device_type /* device_type */, cl_uint /* num_entries */, cl_device_id * /* devices */, cl_uint * /* num_devices */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clGetDeviceInfo)(cl_device_id /* device */, cl_device_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Context APIs */ extern CL_API_ENTRY cl_context CL_API_CALL (*clCreateContextFromType)(const cl_context_properties * /* properties */, cl_device_type /* device_type */, void (CL_CALLBACK * /* pfn_notify*/ )(const char *, const void *, size_t, void *), void * /* user_data */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clReleaseContext)(cl_context /* context */) CL_API_SUFFIX__VERSION_1_0; /* Command Queue APIs */ extern CL_API_ENTRY cl_command_queue CL_API_CALL (*clCreateCommandQueue)(cl_context /* context */, cl_device_id /* device */, cl_command_queue_properties /* properties */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clReleaseCommandQueue)(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; /* Memory Object APIs */ extern CL_API_ENTRY cl_mem CL_API_CALL (*clCreateBuffer)(cl_context /* context */, cl_mem_flags /* flags */, size_t /* size */, void * /* host_ptr */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; /* Program Object APIs */ extern CL_API_ENTRY cl_program CL_API_CALL (*clCreateProgramWithSource)(cl_context /* context */, cl_uint /* count */, const char ** /* strings */, const size_t * /* lengths */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_program CL_API_CALL (*clCreateProgramWithBinary)(cl_context /* context */, cl_uint /* num_devices */, const cl_device_id * /* device_list */, const size_t * /* lengths */, const unsigned char ** /* binaries */, cl_int * /* binary_status */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clReleaseProgram)(cl_program /* program */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clBuildProgram)(cl_program /* program */, cl_uint /* num_devices */, const cl_device_id * /* device_list */, const char * /* options */, void (CL_CALLBACK * /* pfn_notify */)(cl_program /* program */, void * /* user_data */), void * /* user_data */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clGetProgramInfo)(cl_program /* program */, cl_program_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clGetProgramBuildInfo)(cl_program /* program */, cl_device_id /* device */, cl_program_build_info /* param_name */, size_t /* param_value_size */, void * /* param_value */, size_t * /* param_value_size_ret */) CL_API_SUFFIX__VERSION_1_0; /* Kernel Object APIs */ extern CL_API_ENTRY cl_kernel CL_API_CALL (*clCreateKernel)(cl_program /* program */, const char * /* kernel_name */, cl_int * /* errcode_ret */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clReleaseKernel)(cl_kernel /* kernel */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clSetKernelArg)(cl_kernel /* kernel */, cl_uint /* arg_index */, size_t /* arg_size */, const void * /* arg_value */) CL_API_SUFFIX__VERSION_1_0; /* Flush and Finish APIs */ extern CL_API_ENTRY cl_int CL_API_CALL (*clFinish)(cl_command_queue /* command_queue */) CL_API_SUFFIX__VERSION_1_0; /* Enqueued Commands APIs */ extern CL_API_ENTRY cl_int CL_API_CALL (*clEnqueueReadBuffer)(cl_command_queue /* command_queue */, cl_mem /* buffer */, cl_bool /* blocking_read */, size_t /* offset */, size_t /* size */, void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clEnqueueWriteBuffer)(cl_command_queue /* command_queue */, cl_mem /* buffer */, cl_bool /* blocking_write */, size_t /* offset */, size_t /* size */, const void * /* ptr */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; extern CL_API_ENTRY cl_int CL_API_CALL (*clEnqueueNDRangeKernel)(cl_command_queue /* command_queue */, cl_kernel /* kernel */, cl_uint /* work_dim */, const size_t * /* global_work_offset */, const size_t * /* global_work_size */, const size_t * /* local_work_size */, cl_uint /* num_events_in_wait_list */, const cl_event * /* event_wait_list */, cl_event * /* event */) CL_API_SUFFIX__VERSION_1_0; int opt_platform_id = -1; #ifdef __APPLE__ // Apple OpenCL doesn't like using binaries this way bool opt_opencl_binaries; #else bool opt_opencl_binaries = true; #endif char *file_contents(const char *filename, int *length) { char *fullpath = alloca(PATH_MAX); void *buffer; FILE *f; /* Try in the optional kernel path or installed prefix first */ f = open_bitstream("opencl", filename); if (!f) { /* Then try from the path BFGMiner was called */ strcpy(fullpath, cgminer_path); strcat(fullpath, filename); f = fopen(fullpath, "rb"); } /* Finally try opening it directly */ if (!f) f = fopen(filename, "rb"); if (!f) { applog(LOG_ERR, "Unable to open %s or %s for reading", filename, fullpath); return NULL; } fseek(f, 0, SEEK_END); *length = ftell(f); fseek(f, 0, SEEK_SET); buffer = malloc(*length+1); *length = fread(buffer, 1, *length, f); fclose(f); ((char*)buffer)[*length] = '\0'; return (char*)buffer; } extern int opt_g_threads; int clDevicesNum(void) { cl_int status; char pbuff[256]; cl_uint numDevices; cl_uint numPlatforms; int most_devices = -1; cl_platform_id *platforms; cl_platform_id platform = NULL; unsigned int i, mdplatform = 0; bool mdmesa = false; status = clGetPlatformIDs(0, NULL, &numPlatforms); /* If this fails, assume no GPUs. */ if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clGetPlatformsIDs failed (no OpenCL SDK installed?)", status); return -1; } if (numPlatforms == 0) { applog(LOG_ERR, "clGetPlatformsIDs returned no platforms (no OpenCL SDK installed?)"); return -1; } platforms = (cl_platform_id *)alloca(numPlatforms*sizeof(cl_platform_id)); status = clGetPlatformIDs(numPlatforms, platforms, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Ids. (clGetPlatformsIDs)", status); return -1; } for (i = 0; i < numPlatforms; i++) { if (opt_platform_id >= 0 && (int)i != opt_platform_id) continue; status = clGetPlatformInfo( platforms[i], CL_PLATFORM_VENDOR, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Info. (clGetPlatformInfo)", status); return -1; } platform = platforms[i]; applog(LOG_INFO, "CL Platform %d vendor: %s", i, pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_NAME, sizeof(pbuff), pbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform %d name: %s", i, pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_VERSION, sizeof(pbuff), pbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform %d version: %s", i, pbuff); status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &numDevices); if (status != CL_SUCCESS) { applog(LOG_INFO, "Error %d: Getting Device IDs (num)", status); continue; } applog(LOG_INFO, "Platform %d devices: %d", i, numDevices); if ((int)numDevices > most_devices) { most_devices = numDevices; mdplatform = i; mdmesa = strstr(pbuff, "MESA"); } if (numDevices) { unsigned int j; cl_device_id *devices = (cl_device_id *)malloc(numDevices*sizeof(cl_device_id)); clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numDevices, devices, NULL); for (j = 0; j < numDevices; j++) { clGetDeviceInfo(devices[j], CL_DEVICE_NAME, sizeof(pbuff), pbuff, NULL); applog(LOG_INFO, "\t%i\t%s", j, pbuff); } free(devices); } } if (opt_platform_id < 0) opt_platform_id = mdplatform;; if (mdmesa && opt_g_threads == -1) opt_g_threads = 1; return most_devices; } static int advance(char **area, unsigned *remaining, const char *marker) { char *find = memmem(*area, *remaining, marker, strlen(marker)); if (!find) { applog(LOG_DEBUG, "Marker \"%s\" not found", marker); return 0; } *remaining -= find - *area; *area = find; return 1; } #define OP3_INST_BFE_UINT 4ULL #define OP3_INST_BFE_INT 5ULL #define OP3_INST_BFI_INT 6ULL #define OP3_INST_BIT_ALIGN_INT 12ULL #define OP3_INST_BYTE_ALIGN_INT 13ULL void patch_opcodes(char *w, unsigned remaining) { uint64_t *opcode = (uint64_t *)w; int patched = 0; int count_bfe_int = 0; int count_bfe_uint = 0; int count_byte_align = 0; while (42) { int clamp = (*opcode >> (32 + 31)) & 0x1; int dest_rel = (*opcode >> (32 + 28)) & 0x1; int alu_inst = (*opcode >> (32 + 13)) & 0x1f; int s2_neg = (*opcode >> (32 + 12)) & 0x1; int s2_rel = (*opcode >> (32 + 9)) & 0x1; int pred_sel = (*opcode >> 29) & 0x3; if (!clamp && !dest_rel && !s2_neg && !s2_rel && !pred_sel) { if (alu_inst == OP3_INST_BFE_INT) { count_bfe_int++; } else if (alu_inst == OP3_INST_BFE_UINT) { count_bfe_uint++; } else if (alu_inst == OP3_INST_BYTE_ALIGN_INT) { count_byte_align++; // patch this instruction to BFI_INT *opcode &= 0xfffc1fffffffffffULL; *opcode |= OP3_INST_BFI_INT << (32 + 13); patched++; } } if (remaining <= 8) break; opcode++; remaining -= 8; } applog(LOG_DEBUG, "Potential OP3 instructions identified: " "%i BFE_INT, %i BFE_UINT, %i BYTE_ALIGN", count_bfe_int, count_bfe_uint, count_byte_align); applog(LOG_DEBUG, "Patched a total of %i BFI_INT instructions", patched); } _clState *initCl(unsigned int gpu, char *name, size_t nameSize) { _clState *clState = calloc(1, sizeof(_clState)); bool patchbfi = false, prog_built = false; bool usebinary = opt_opencl_binaries, ismesa = false; struct cgpu_info *cgpu = &gpus[gpu]; cl_platform_id platform = NULL; char pbuff[256], vbuff[255]; char *s; cl_platform_id* platforms; cl_uint preferred_vwidth; cl_device_id *devices; cl_uint numPlatforms; cl_uint numDevices; cl_int status; status = clGetPlatformIDs(0, NULL, &numPlatforms); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platforms. (clGetPlatformsIDs)", status); return NULL; } platforms = (cl_platform_id *)alloca(numPlatforms*sizeof(cl_platform_id)); status = clGetPlatformIDs(numPlatforms, platforms, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Ids. (clGetPlatformsIDs)", status); return NULL; } if (opt_platform_id >= (int)numPlatforms) { applog(LOG_ERR, "Specified platform that does not exist"); return NULL; } status = clGetPlatformInfo(platforms[opt_platform_id], CL_PLATFORM_VENDOR, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Platform Info. (clGetPlatformInfo)", status); return NULL; } platform = platforms[opt_platform_id]; if (platform == NULL) { perror("NULL platform found!\n"); return NULL; } applog(LOG_INFO, "CL Platform vendor: %s", pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_NAME, sizeof(pbuff), pbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform name: %s", pbuff); status = clGetPlatformInfo(platform, CL_PLATFORM_VERSION, sizeof(vbuff), vbuff, NULL); if (status == CL_SUCCESS) applog(LOG_INFO, "CL Platform version: %s", vbuff); status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &numDevices); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device IDs (num)", status); return NULL; } if (numDevices > 0 ) { devices = (cl_device_id *)malloc(numDevices*sizeof(cl_device_id)); /* Now, get the device list data */ status = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, numDevices, devices, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device IDs (list)", status); return NULL; } applog(LOG_INFO, "List of devices:"); unsigned int i; for (i = 0; i < numDevices; i++) { status = clGetDeviceInfo(devices[i], CL_DEVICE_NAME, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device Info", status); return NULL; } applog(LOG_INFO, "\t%i\t%s", i, pbuff); } if (gpu < numDevices) { status = clGetDeviceInfo(devices[gpu], CL_DEVICE_NAME, sizeof(pbuff), pbuff, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Getting Device Info", status); return NULL; } applog(LOG_INFO, "Selected %i: %s", gpu, pbuff); strncpy(name, pbuff, nameSize); } else { applog(LOG_ERR, "Invalid GPU %i", gpu); return NULL; } } else return NULL; cl_context_properties cps[3] = { CL_CONTEXT_PLATFORM, (cl_context_properties)platform, 0 }; clState->context = clCreateContextFromType(cps, CL_DEVICE_TYPE_GPU, NULL, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Context. (clCreateContextFromType)", status); return NULL; } ///////////////////////////////////////////////////////////////// // Create an OpenCL command queue ///////////////////////////////////////////////////////////////// clState->commandQueue = clCreateCommandQueue(clState->context, devices[gpu], CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, &status); if (status != CL_SUCCESS) /* Try again without OOE enable */ clState->commandQueue = clCreateCommandQueue(clState->context, devices[gpu], 0 , &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Command Queue. (clCreateCommandQueue)", status); return NULL; } /* Check for BFI INT support. Hopefully people don't mix devices with * and without it! */ char * extensions = malloc(1024); const char * camo = "cl_amd_media_ops"; char *find; status = clGetDeviceInfo(devices[gpu], CL_DEVICE_EXTENSIONS, 1024, (void *)extensions, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_EXTENSIONS", status); return NULL; } find = strstr(extensions, camo); if (find) clState->hasBitAlign = true; /* Check for OpenCL >= 1.0 support, needed for global offset parameter usage. */ char * devoclver = malloc(1024); const char * ocl10 = "OpenCL 1.0"; status = clGetDeviceInfo(devices[gpu], CL_DEVICE_VERSION, 1024, (void *)devoclver, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_VERSION", status); return NULL; } find = strstr(devoclver, ocl10); if (!find) clState->hasOpenCL11plus = true; status = clGetDeviceInfo(devices[gpu], CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT, sizeof(cl_uint), (void *)&preferred_vwidth, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_PREFERRED_VECTOR_WIDTH_INT", status); return NULL; } applog(LOG_DEBUG, "Preferred vector width reported %d", preferred_vwidth); status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(size_t), (void *)&clState->max_work_size, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_WORK_GROUP_SIZE", status); return NULL; } applog(LOG_DEBUG, "Max work group size reported %"PRId64, (int64_t)clState->max_work_size); status = clGetDeviceInfo(devices[gpu], CL_DEVICE_MAX_MEM_ALLOC_SIZE , sizeof(cl_ulong), (void *)&cgpu->max_alloc, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Failed to clGetDeviceInfo when trying to get CL_DEVICE_MAX_MEM_ALLOC_SIZE", status); return NULL; } applog(LOG_DEBUG, "Max mem alloc size is %lu", (unsigned long)cgpu->max_alloc); if (strstr(vbuff, "MESA")) { applog(LOG_DEBUG, "Mesa OpenCL platform detected, disabling OpenCL kernel binaries and bitalign"); clState->hasBitAlign = false; usebinary = false; ismesa = true; } /* Create binary filename based on parameters passed to opencl * compiler to ensure we only load a binary that matches what would * have otherwise created. The filename is: * kernelname + name +/- g(offset) + v + vectors + w + work_size + l + sizeof(long) + p + platform version + .bin * For scrypt the filename is: * kernelname + name + g + lg + lookup_gap + tc + thread_concurrency + w + work_size + l + sizeof(long) + p + platform version + .bin */ char binaryfilename[255]; char filename[255]; char numbuf[32]; if (cgpu->kernel == KL_NONE) { if (opt_scrypt) { applog(LOG_INFO, "Selecting scrypt kernel"); clState->chosen_kernel = KL_SCRYPT; } else if (ismesa) { applog(LOG_INFO, "Selecting phatk kernel for Mesa"); clState->chosen_kernel = KL_PHATK; } else if (!strstr(name, "Tahiti") && /* Detect all 2.6 SDKs not with Tahiti and use diablo kernel */ (strstr(vbuff, "844.4") || // Linux 64 bit ATI 2.6 SDK strstr(vbuff, "851.4") || // Windows 64 bit "" strstr(vbuff, "831.4") || strstr(vbuff, "898.1") || // 12.2 driver SDK strstr(vbuff, "923.1") || // 12.4 strstr(vbuff, "938.2") || // SDK 2.7 strstr(vbuff, "1113.2"))) {// SDK 2.8 applog(LOG_INFO, "Selecting diablo kernel"); clState->chosen_kernel = KL_DIABLO; /* Detect all 7970s, older ATI and NVIDIA and use poclbm */ } else if (strstr(name, "Tahiti") || !clState->hasBitAlign) { applog(LOG_INFO, "Selecting poclbm kernel"); clState->chosen_kernel = KL_POCLBM; /* Use phatk for the rest R5xxx R6xxx */ } else { applog(LOG_INFO, "Selecting phatk kernel"); clState->chosen_kernel = KL_PHATK; } cgpu->kernel = clState->chosen_kernel; } else { clState->chosen_kernel = cgpu->kernel; if (clState->chosen_kernel == KL_PHATK && (strstr(vbuff, "844.4") || strstr(vbuff, "851.4") || strstr(vbuff, "831.4") || strstr(vbuff, "898.1") || strstr(vbuff, "923.1") || strstr(vbuff, "938.2") || strstr(vbuff, "1113.2"))) { applog(LOG_WARNING, "WARNING: You have selected the phatk kernel."); applog(LOG_WARNING, "You are running SDK 2.6+ which performs poorly with this kernel."); applog(LOG_WARNING, "Downgrade your SDK and delete any .bin files before starting again."); applog(LOG_WARNING, "Or allow BFGMiner to automatically choose a more suitable kernel."); } } /* For some reason 2 vectors is still better even if the card says * otherwise, and many cards lie about their max so use 256 as max * unless explicitly set on the command line. Tahiti prefers 1 */ if (strstr(name, "Tahiti")) preferred_vwidth = 1; else if (preferred_vwidth > 2) preferred_vwidth = 2; switch (clState->chosen_kernel) { case KL_POCLBM: strcpy(filename, POCLBM_KERNNAME".cl"); strcpy(binaryfilename, POCLBM_KERNNAME); break; case KL_PHATK: strcpy(filename, PHATK_KERNNAME".cl"); strcpy(binaryfilename, PHATK_KERNNAME); break; case KL_DIAKGCN: strcpy(filename, DIAKGCN_KERNNAME".cl"); strcpy(binaryfilename, DIAKGCN_KERNNAME); break; case KL_SCRYPT: strcpy(filename, SCRYPT_KERNNAME".cl"); strcpy(binaryfilename, SCRYPT_KERNNAME); /* Scrypt only supports vector 1 */ cgpu->vwidth = 1; break; case KL_NONE: /* Shouldn't happen */ case KL_DIABLO: strcpy(filename, DIABLO_KERNNAME".cl"); strcpy(binaryfilename, DIABLO_KERNNAME); break; } if (cgpu->vwidth) clState->vwidth = cgpu->vwidth; else { clState->vwidth = preferred_vwidth; cgpu->vwidth = preferred_vwidth; } if (((clState->chosen_kernel == KL_POCLBM || clState->chosen_kernel == KL_DIABLO || clState->chosen_kernel == KL_DIAKGCN) && clState->vwidth == 1 && clState->hasOpenCL11plus) || opt_scrypt) clState->goffset = true; if (cgpu->work_size && cgpu->work_size <= clState->max_work_size) clState->wsize = cgpu->work_size; else if (opt_scrypt) clState->wsize = 256; else if (strstr(name, "Tahiti")) clState->wsize = 64; else clState->wsize = (clState->max_work_size <= 256 ? clState->max_work_size : 256) / clState->vwidth; cgpu->work_size = clState->wsize; #ifdef USE_SCRYPT if (opt_scrypt) { if (!cgpu->opt_lg) { applog(LOG_DEBUG, "GPU %d: selecting lookup gap of 2", gpu); cgpu->lookup_gap = 2; } else cgpu->lookup_gap = cgpu->opt_lg; if (!cgpu->opt_tc) { unsigned int sixtyfours; sixtyfours = cgpu->max_alloc / 131072 / 64 - 1; cgpu->thread_concurrency = sixtyfours * 64; if (cgpu->shaders && cgpu->thread_concurrency > cgpu->shaders) { cgpu->thread_concurrency -= cgpu->thread_concurrency % cgpu->shaders; if (cgpu->thread_concurrency > cgpu->shaders * 5) cgpu->thread_concurrency = cgpu->shaders * 5; } applog(LOG_DEBUG, "GPU %u: selecting thread concurrency of %lu", gpu, (unsigned long)cgpu->thread_concurrency); } else cgpu->thread_concurrency = cgpu->opt_tc; } #endif FILE *binaryfile; size_t *binary_sizes; char **binaries; int pl; char *source = file_contents(filename, &pl); size_t sourceSize[] = {(size_t)pl}; cl_uint slot, cpnd; slot = cpnd = 0; if (!source) return NULL; binary_sizes = calloc(sizeof(size_t) * MAX_GPUDEVICES * 4, 1); if (unlikely(!binary_sizes)) { applog(LOG_ERR, "Unable to calloc binary_sizes"); return NULL; } binaries = calloc(sizeof(char *) * MAX_GPUDEVICES * 4, 1); if (unlikely(!binaries)) { applog(LOG_ERR, "Unable to calloc binaries"); return NULL; } strcat(binaryfilename, name); if (clState->goffset) strcat(binaryfilename, "g"); if (opt_scrypt) { #ifdef USE_SCRYPT sprintf(numbuf, "lg%utc%u", cgpu->lookup_gap, (unsigned int)cgpu->thread_concurrency); strcat(binaryfilename, numbuf); #endif } else { sprintf(numbuf, "v%d", clState->vwidth); strcat(binaryfilename, numbuf); } sprintf(numbuf, "w%d", (int)clState->wsize); strcat(binaryfilename, numbuf); sprintf(numbuf, "l%d", (int)sizeof(long)); strcat(binaryfilename, numbuf); strcat(binaryfilename, "p"); strcat(binaryfilename, vbuff); sanestr(binaryfilename, binaryfilename); applog(LOG_DEBUG, "OCL%2u: Configured OpenCL kernel name: %s", gpu, binaryfilename); strcat(binaryfilename, ".bin"); if (!usebinary) goto build; binaryfile = fopen(binaryfilename, "rb"); if (!binaryfile) { applog(LOG_DEBUG, "No binary found, generating from source"); } else { struct stat binary_stat; if (unlikely(stat(binaryfilename, &binary_stat))) { applog(LOG_DEBUG, "Unable to stat binary, generating from source"); fclose(binaryfile); goto build; } if (!binary_stat.st_size) goto build; binary_sizes[slot] = binary_stat.st_size; binaries[slot] = (char *)calloc(binary_sizes[slot], 1); if (unlikely(!binaries[slot])) { applog(LOG_ERR, "Unable to calloc binaries"); fclose(binaryfile); return NULL; } if (fread(binaries[slot], 1, binary_sizes[slot], binaryfile) != binary_sizes[slot]) { applog(LOG_ERR, "Unable to fread binaries"); fclose(binaryfile); free(binaries[slot]); goto build; } clState->program = clCreateProgramWithBinary(clState->context, 1, &devices[gpu], &binary_sizes[slot], (const unsigned char **)binaries, &status, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Loading Binary into cl_program (clCreateProgramWithBinary)", status); fclose(binaryfile); free(binaries[slot]); goto build; } fclose(binaryfile); applog(LOG_DEBUG, "Loaded binary image %s", binaryfilename); goto built; } ///////////////////////////////////////////////////////////////// // Load CL file, build CL program object, create CL kernel object ///////////////////////////////////////////////////////////////// build: clState->program = clCreateProgramWithSource(clState->context, 1, (const char **)&source, sourceSize, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Loading Binary into cl_program (clCreateProgramWithSource)", status); return NULL; } /* create a cl program executable for all the devices specified */ char *CompilerOptions = calloc(1, 256); #ifdef USE_SCRYPT if (opt_scrypt) sprintf(CompilerOptions, "-D LOOKUP_GAP=%d -D CONCURRENT_THREADS=%d -D WORKSIZE=%d", cgpu->lookup_gap, (unsigned int)cgpu->thread_concurrency, (int)clState->wsize); else #endif { sprintf(CompilerOptions, "-D WORKSIZE=%d -D VECTORS%d -D WORKVEC=%d", (int)clState->wsize, clState->vwidth, (int)clState->wsize * clState->vwidth); } applog(LOG_DEBUG, "Setting worksize to %"PRId64, (int64_t)clState->wsize); if (clState->vwidth > 1) applog(LOG_DEBUG, "Patched source to suit %d vectors", clState->vwidth); if (clState->hasBitAlign) { strcat(CompilerOptions, " -D BITALIGN"); applog(LOG_DEBUG, "cl_amd_media_ops found, setting BITALIGN"); if (strstr(name, "Cedar") || strstr(name, "Redwood") || strstr(name, "Juniper") || strstr(name, "Cypress" ) || strstr(name, "Hemlock" ) || strstr(name, "Caicos" ) || strstr(name, "Turks" ) || strstr(name, "Barts" ) || strstr(name, "Cayman" ) || strstr(name, "Antilles" ) || strstr(name, "Wrestler" ) || strstr(name, "Zacate" ) || strstr(name, "WinterPark" )) { // BFI_INT patching only works with AMD-APP up to 1084 if (strstr(vbuff, "ATI-Stream")) patchbfi = true; else if ((s = strstr(vbuff, "AMD-APP")) && (s = strchr(s, '(')) && atoi(&s[1]) < 1085) patchbfi = true; } } else applog(LOG_DEBUG, "cl_amd_media_ops not found, will not set BITALIGN"); if (patchbfi) { strcat(CompilerOptions, " -D BFI_INT"); applog(LOG_DEBUG, "BFI_INT patch requiring device found, patched source with BFI_INT"); } else applog(LOG_DEBUG, "BFI_INT patch requiring device not found, will not BFI_INT patch"); if (clState->goffset) strcat(CompilerOptions, " -D GOFFSET"); if (!clState->hasOpenCL11plus) strcat(CompilerOptions, " -D OCL1"); applog(LOG_DEBUG, "CompilerOptions: %s", CompilerOptions); status = clBuildProgram(clState->program, 1, &devices[gpu], CompilerOptions , NULL, NULL); free(CompilerOptions); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Building Program (clBuildProgram)", status); size_t logSize; status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, 0, NULL, &logSize); char *log = malloc(logSize); status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, logSize, log, NULL); applog(LOG_ERR, "%s", log); return NULL; } prog_built = true; if (!usebinary) goto built; status = clGetProgramInfo(clState->program, CL_PROGRAM_NUM_DEVICES, sizeof(cl_uint), &cpnd, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Getting program info CL_PROGRAM_NUM_DEVICES. (clGetProgramInfo)", status); return NULL; } status = clGetProgramInfo(clState->program, CL_PROGRAM_BINARY_SIZES, sizeof(size_t)*cpnd, binary_sizes, NULL); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Getting program info CL_PROGRAM_BINARY_SIZES. (clGetProgramInfo)", status); return NULL; } /* The actual compiled binary ends up in a RANDOM slot! Grr, so we have * to iterate over all the binary slots and find where the real program * is. What the heck is this!? */ for (slot = 0; slot < cpnd; slot++) if (binary_sizes[slot]) break; /* copy over all of the generated binaries. */ applog(LOG_DEBUG, "Binary size for gpu %u found in binary slot %u: %"PRId64, gpu, (unsigned)slot, (int64_t)binary_sizes[slot]); if (!binary_sizes[slot]) { applog(LOG_ERR, "OpenCL compiler generated a zero sized binary, FAIL!"); return NULL; } binaries[slot] = calloc(sizeof(char) * binary_sizes[slot], 1); status = clGetProgramInfo(clState->program, CL_PROGRAM_BINARIES, sizeof(char *) * cpnd, binaries, NULL ); if (unlikely(status != CL_SUCCESS)) { applog(LOG_ERR, "Error %d: Getting program info. CL_PROGRAM_BINARIES (clGetProgramInfo)", status); return NULL; } /* Patch the kernel if the hardware supports BFI_INT but it needs to * be hacked in */ if (patchbfi) { unsigned remaining = binary_sizes[slot]; char *w = binaries[slot]; unsigned int start, length; /* Find 2nd incidence of .text, and copy the program's * position and length at a fixed offset from that. Then go * back and find the 2nd incidence of \x7ELF (rewind by one * from ELF) and then patch the opcocdes */ if (!advance(&w, &remaining, ".text")) goto build; w++; remaining--; if (!advance(&w, &remaining, ".text")) { /* 32 bit builds only one ELF */ w--; remaining++; } memcpy(&start, w + 285, 4); memcpy(&length, w + 289, 4); w = binaries[slot]; remaining = binary_sizes[slot]; if (!advance(&w, &remaining, "ELF")) goto build; w++; remaining--; if (!advance(&w, &remaining, "ELF")) { /* 32 bit builds only one ELF */ w--; remaining++; } w--; remaining++; w += start; remaining -= start; applog(LOG_DEBUG, "At %p (%u rem. bytes), to begin patching", w, remaining); patch_opcodes(w, length); status = clReleaseProgram(clState->program); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Releasing program. (clReleaseProgram)", status); return NULL; } clState->program = clCreateProgramWithBinary(clState->context, 1, &devices[gpu], &binary_sizes[slot], (const unsigned char **)&binaries[slot], &status, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Loading Binary into cl_program (clCreateProgramWithBinary)", status); return NULL; } /* Program needs to be rebuilt */ prog_built = false; } free(source); /* Save the binary to be loaded next time */ binaryfile = fopen(binaryfilename, "wb"); if (!binaryfile) { /* Not a fatal problem, just means we build it again next time */ applog(LOG_DEBUG, "Unable to create file %s", binaryfilename); } else { if (unlikely(fwrite(binaries[slot], 1, binary_sizes[slot], binaryfile) != binary_sizes[slot])) { applog(LOG_ERR, "Unable to fwrite to binaryfile"); return NULL; } fclose(binaryfile); } built: if (binaries[slot]) free(binaries[slot]); free(binaries); free(binary_sizes); applog(LOG_INFO, "Initialising kernel %s with%s bitalign, %"PRId64" vectors and worksize %"PRIu64, filename, clState->hasBitAlign ? "" : "out", (int64_t)clState->vwidth, (uint64_t)clState->wsize); if (!prog_built) { /* create a cl program executable for all the devices specified */ status = clBuildProgram(clState->program, 1, &devices[gpu], NULL, NULL, NULL); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Building Program (clBuildProgram)", status); size_t logSize; status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, 0, NULL, &logSize); char *log = malloc(logSize); status = clGetProgramBuildInfo(clState->program, devices[gpu], CL_PROGRAM_BUILD_LOG, logSize, log, NULL); applog(LOG_ERR, "%s", log); return NULL; } } /* get a kernel object handle for a kernel with the given name */ clState->kernel = clCreateKernel(clState->program, "search", &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: Creating Kernel from program. (clCreateKernel)", status); return NULL; } #ifdef USE_SCRYPT if (opt_scrypt) { size_t ipt = (1024 / cgpu->lookup_gap + (1024 % cgpu->lookup_gap > 0)); size_t bufsize = 128 * ipt * cgpu->thread_concurrency; /* Use the max alloc value which has been rounded to a power of * 2 greater >= required amount earlier */ if (bufsize > cgpu->max_alloc) { applog(LOG_WARNING, "Maximum buffer memory device %d supports says %lu", gpu, (unsigned long)cgpu->max_alloc); applog(LOG_WARNING, "Your scrypt settings come to %lu", (unsigned long)bufsize); } applog(LOG_DEBUG, "Creating scrypt buffer sized %lu", (unsigned long)bufsize); clState->padbufsize = bufsize; /* This buffer is weird and might work to some degree even if * the create buffer call has apparently failed, so check if we * get anything back before we call it a failure. */ clState->padbuffer8 = NULL; clState->padbuffer8 = clCreateBuffer(clState->context, CL_MEM_READ_WRITE, bufsize, NULL, &status); if (status != CL_SUCCESS && !clState->padbuffer8) { applog(LOG_ERR, "Error %d: clCreateBuffer (padbuffer8), decrease TC or increase LG", status); return NULL; } clState->CLbuffer0 = clCreateBuffer(clState->context, CL_MEM_READ_ONLY, 128, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clCreateBuffer (CLbuffer0)", status); return NULL; } clState->outputBuffer = clCreateBuffer(clState->context, CL_MEM_WRITE_ONLY, SCRYPT_BUFFERSIZE, NULL, &status); } else #endif clState->outputBuffer = clCreateBuffer(clState->context, CL_MEM_WRITE_ONLY, BUFFERSIZE, NULL, &status); if (status != CL_SUCCESS) { applog(LOG_ERR, "Error %d: clCreateBuffer (outputBuffer)", status); return NULL; } return clState; } #endif /* HAVE_OPENCL */ bfgminer-bfgminer-3.10.0/ocl.h000066400000000000000000000013451226556647300161240ustar00rootroot00000000000000#ifndef __OCL_H__ #define __OCL_H__ #include "config.h" #include #ifdef HAVE_OPENCL #include "CL/cl.h" #include "miner.h" typedef struct { cl_context context; cl_kernel kernel; cl_command_queue commandQueue; cl_program program; cl_mem outputBuffer; #ifdef USE_SCRYPT cl_mem CLbuffer0; cl_mem padbuffer8; size_t padbufsize; void * cldata; #endif bool hasBitAlign; bool hasOpenCL11plus; bool goffset; cl_uint vwidth; size_t max_work_size; size_t wsize; enum cl_kernels chosen_kernel; } _clState; extern char *file_contents(const char *filename, int *length); extern int clDevicesNum(void); extern _clState *initCl(unsigned int gpu, char *name, size_t nameSize); #endif /* HAVE_OPENCL */ #endif /* __OCL_H__ */ bfgminer-bfgminer-3.10.0/openwrt/000077500000000000000000000000001226556647300166715ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/openwrt/.gitignore000066400000000000000000000000511226556647300206550ustar00rootroot00000000000000!bfgminer !bfgminer/Makefile openwrt-src bfgminer-bfgminer-3.10.0/openwrt/bfgminer/000077500000000000000000000000001226556647300204625ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/openwrt/bfgminer/Makefile000066400000000000000000000072461226556647300221330ustar00rootroot00000000000000# # Copyright 2013 Luke Dashjr # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. See COPYING for more details. # include $(TOPDIR)/rules.mk PKG_NAME:=bfgminer PKG_TITLE:=BFGMiner PKG_VERSION:=3.10.0 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tbz2 PKG_SOURCE_URL:=http://luke.dashjr.org/programs/bitcoin/files/$(PKG_NAME)/$(PKG_VERSION)/ PKG_INSTALL:=1 include $(INCLUDE_DIR)/package.mk define Download/uthash FILE:=v1.9.8 URL:=https://codeload.github.com/troydhanson/uthash/tar.gz/ endef $(eval $(call Download,uthash)) define Package/$(PKG_NAME)/Default MAINTAINER:="Luke Dashjr" SECTION:=utils CATEGORY:=Utilities URL:=http://luke.dashjr.org/programs/$(PKG_NAME) DEPENDS:=+$(PKG_NAME) endef define Package/$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) TITLE:=$(PKG_TITLE) (Bitcoin miner) DEPENDS:=+libc +libcurl +libpthread +jansson +PACKAGE_$(PKG_NAME)_libevent:libevent2 +PACKAGE_$(PKG_NAME)_curses:libncurses +PACKAGE_$(PKG_NAME)_libmicrohttpd:libmicrohttpd +PACKAGE_$(PKG_NAME)_libusb:libusb-1.0 endef define Package/$(PKG_NAME)/description Modular Bitcoin ASIC/FPGA/GPU/CPU miner in C endef define Package/$(PKG_NAME)/config config PACKAGE_$(PKG_NAME)_curses bool "Build with curses TUI support" depends on PACKAGE_$(PKG_NAME) default y config PACKAGE_$(PKG_NAME)_libevent bool "Build with stratum server support" depends on PACKAGE_$(PKG_NAME) default y config PACKAGE_$(PKG_NAME)_libmicrohttpd bool "Build with getwork server support (Block Erupter blades)" depends on PACKAGE_$(PKG_NAME) default y config PACKAGE_$(PKG_NAME)_libusb bool "Build with libusb support (X6500 & ZTEX)" depends on PACKAGE_$(PKG_NAME) default y endef ifndef CONFIG_PACKAGE_$(PKG_NAME)_curses CONFIGURE_ARGS += --without-curses else CONFIGURE_ARGS += --with-curses=ncurses endif ifndef CONFIG_PACKAGE_$(PKG_NAME)_libevent CONFIGURE_ARGS += --without-libevent else CONFIGURE_ARGS += --with-libevent endif ifndef CONFIG_PACKAGE_$(PKG_NAME)_libmicrohttpd CONFIGURE_ARGS += --without-libmicrohttpd else CONFIGURE_ARGS += --with-libmicrohttpd endif ifndef CONFIG_PACKAGE_$(PKG_NAME)_libusb CONFIGURE_ARGS += --disable-x6500 --disable-ztex endif TARGET_CFLAGS += -std=gnu99 TARGET_CFLAGS += -Iuthash-1.9.8/src CONFIGURE_ARGS += --without-libudev CONFIGURE_ARGS += --without-sensors define Build/Prepare $(call Build/Prepare/Default) gzip -dc $(DL_DIR)/v1.9.8 | $(HOST_TAR) -C $(PKG_BUILD_DIR) $(TAR_OPTIONS) endef define Build/Configure # Need to remake configure etc to pick up on cross-compiler libtool ( cd $(PKG_BUILD_DIR); NOSUBMODULES=1 ./autogen.sh; ) $(call Build/Configure/Default) endef define Package/$(PKG_NAME)/install $(INSTALL_DIR) $(1)/usr/bin $(1)/usr/lib $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/$(PKG_NAME) $(1)/usr/bin $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/$(PKG_NAME)-rpc $(1)/usr/bin $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/lib/libblkmaker*.so* $(1)/usr/lib endef ALL_$(PKG_NAME)_PACKAGES += $(PKG_NAME) #### BitForce firmware flash #### define Package/bitforce-firmware-flash $(call Package/$(PKG_NAME)/Default) TITLE:=BitForce firmware flash tool endef define Package/bitforce-firmware-flash/description BitForce firmware flash tool endef define Package/bitforce-firmware-flash/install $(INSTALL_DIR) $(1)/usr/bin $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/bitforce-firmware-flash $(1)/usr/bin endef ALL_$(PKG_NAME)_PACKAGES += bitforce-firmware-flash $(foreach bitstream,$(ALL_$(PKG_NAME)_PACKAGES),$(eval $(call BuildPackage,$(bitstream)))) bfgminer-bfgminer-3.10.0/openwrt/multibuild.sh000077500000000000000000000035041226556647300214040ustar00rootroot00000000000000#!/bin/bash # Copyright 2013 Luke Dashjr # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. See COPYING for more details. set -e set -x reporoot="$1" # .../files/bfgminer/BFGMINER_VERSION/openwrt/OPENWRT_VERSION test -n "$reporoot" reporoot="$(realpath "$reporoot")" test -n "$reporoot" cd "openwrt-src/" test -d "$reporoot" vcfgdir='vanilla_configs' vcfglist="$( ls -d "$vcfgdir"/*.config* | perl -ple 's[.*/][]' | sort -n )" BITSTREAM_PKG_PATH='../../../../bitstreams/openwrt/' # Relative to reporoot BITSTREAMS=( fpgaminer_402-1 ztex-ufm1_15b1_121126-1 ztex-ufm1_15d4_121126-1 ztex-ufm1_15y1_121126-1 ) if [ -d "${reporoot}/${BITSTREAM_PKG_PATH}" ]; then ( for bs in ${BITSTREAMS[@]}; do if ! [ -r "${reporoot}/${BITSTREAM_PKG_PATH}/bitstream-${bs}_all.ipk" ]; then echo "Cannot find ${bs} bitstream package" >&2 exit 1 fi done ) else echo 'Cannot find bitstreams directory' >&2 exit 1 fi plat1='' for cfn in $vcfglist; do plat="$(perl -ple 's/^(\d+)\.config\.(\w+?)_\w+$/$2/ or $_=""' <<<"$cfn")" test -n "$plat" || continue platlist+=("$plat") cp -v "$vcfgdir/$cfn" .config yes '' | make oldconfig make {tools,toolchain}/install package/bfgminer/{clean,compile} mkdir "$reporoot/$plat" -pv cp -v "bin/$plat/packages/"b{fgminer,itforce}*_${plat}.ipk "$reporoot/$plat/" if [ -d "$reporoot/${BITSTREAM_PKG_PATH}" ]; then ( cd "$reporoot/$plat" for bs in ${BITSTREAMS[@]}; do ln -vfs "../${BITSTREAM_PKG_PATH}/bitstream-${bs}_all.ipk" . done ) fi staging_dir/host/bin/ipkg-make-index "$reporoot/$plat/" > "$reporoot/$plat/Packages" gzip -9 < "$reporoot/$plat/Packages" > "$reporoot/$plat/Packages.gz" done bfgminer-bfgminer-3.10.0/packaging/000077500000000000000000000000001226556647300171175ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/packaging/suse/000077500000000000000000000000001226556647300200765ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/packaging/suse/Makefile.am.patch000066400000000000000000000006431226556647300232330ustar00rootroot00000000000000--- a/Makefile.am 2013-04-06 14:46:37.955969119 +0200 +++ b/Makefile.am 2013-04-06 14:46:51.394965657 +0200 @@ -38,7 +38,7 @@ if NEED_LIBBLKMAKER SUBDIRS += libblkmaker bfgminer_CPPFLAGS += -Ilibblkmaker -bfgminer_LDFLAGS += -Llibblkmaker/.libs -Wl,-rpath,libblkmaker/.libs +bfgminer_LDFLAGS += -Llibblkmaker/.libs bfgminer_LDADD += -lblkmaker_jansson-0.1 -lblkmaker-0.1 if HAVE_CYGWIN bfgminer-bfgminer-3.10.0/packaging/suse/Makefile.in.patch000066400000000000000000000012561226556647300232450ustar00rootroot00000000000000--- a/Makefile.in 2013-04-04 18:03:11.198185097 +0200 +++ b/Makefile.in 2013-04-04 18:03:39.020202287 +0200 @@ -57,7 +57,7 @@ bin_PROGRAMS = bfgminer$(EXEEXT) $(am__EXEEXT_1) bfgminer-rpc$(EXEEXT) @NEED_LIBBLKMAKER_TRUE@am__append_1 = libblkmaker @NEED_LIBBLKMAKER_TRUE@am__append_2 = -Ilibblkmaker -@NEED_LIBBLKMAKER_TRUE@am__append_3 = -Llibblkmaker/.libs -Wl,-rpath,libblkmaker/.libs +@NEED_LIBBLKMAKER_TRUE@am__append_3 = -Llibblkmaker/.libs @NEED_LIBBLKMAKER_TRUE@am__append_4 = -lblkmaker-0.1 -lblkmaker_jansson-0.1 @HAVE_CYGWIN_TRUE@@NEED_LIBBLKMAKER_TRUE@am__append_5 = cygblkmaker-0.1-0.dll cygblkmaker_jansson-0.1-0.dll @HAS_SCRYPT_TRUE@am__append_6 = scrypt.c scrypt.h bfgminer-bfgminer-3.10.0/packaging/suse/bfgminer.changes000066400000000000000000000005001226556647300232140ustar00rootroot00000000000000------------------------------------------------------------------- Sat Apr 6 14:43:43 CEST 2013 - berendt@b1-systems.de - updated to version 3.0.0 ------------------------------------------------------------------- Thu Apr 4 17:06:56 CEST 2013 - berendt@b1-systems.de - initial package for bfgminer version 2.99.1 bfgminer-bfgminer-3.10.0/packaging/suse/bfgminer.rpmlintrc000066400000000000000000000004031226556647300236200ustar00rootroot00000000000000addFilter("devel-file-in-non-devel-package") addFilter("no-manual-page-for-binary") addFilter("wrong-script-end-of-line-encoding") addFilter("standard-dir-owned-by-package") addFilter("script-without-shebang") addFilter("binary-or-shlib-calls-gethostbyname") bfgminer-bfgminer-3.10.0/packaging/suse/bfgminer.spec000066400000000000000000000046441226556647300225530ustar00rootroot00000000000000# # Copyright (c) 2013 Christian Berendt. # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed # upon. The license for this file, and modifications and additions to the # file, is the same license as for the pristine package itself (unless the # license for the pristine package is not an Open Source License, in which # case the license is the MIT License). An "Open Source License" is a # license that conforms to the Open Source Definition (Version 1.9) # published by the Open Source Initiative. # # Please submit bugfixes or comments via http://bugs.opensuse.org/ # Name: bfgminer Version: 3.0.0 Release: 0 Summary: A BitCoin miner License: GPL-3.0 Group: Productivity/Other Url: https://github.com/luke-jr/bfgminer BuildRoot: %{_tmppath}/%{name}-%{version}-build Source: http://luke.dashjr.org/programs/bitcoin/files/bfgminer/%{version}/%{name}-%{version}.tbz2 Patch0: Makefile.in.patch Patch1: Makefile.am.patch BuildRequires: automake BuildRequires: libtool BuildRequires: pkg-config BuildRequires: make BuildRequires: gcc BuildRequires: yasm BuildRequires: libjansson-devel BuildRequires: libcurl-devel BuildRequires: libusb-devel BuildRequires: libudev-devel BuildRequires: ncurses-devel %description This is a multi-threaded multi-pool FPGA, GPU and CPU miner with ATI GPU monitoring, (over)clocking and fanspeed support for bitcoin and derivative coins. %package devel Summary: A BitCoin miner Group: Development/Libraries/C and C++ Requires: %{name} = %{version}-%{release} %description devel This is a multi-threaded multi-pool FPGA, GPU and CPU miner with ATI GPU monitoring, (over)clocking and fanspeed support for bitcoin and derivative coins. %prep %setup -q %patch0 -p1 %patch1 -p1 %configure \ --enable-cpumining \ --enable-scrypt %build make %{?_smp_mflags} %install %make_install install -d -m 755 %{buildroot}/%{_datadir}/%{name} mv %{buildroot}%{_bindir}/*.cl %{buildroot}/%{_datadir}/%{name} mv %{buildroot}%{_bindir}/bitstreams %{buildroot}/%{_datadir}/%{name} %clean rm -rf %{buildroot} %post -p /sbin/ldconfig %postun -p /sbin/ldconfig %files %defattr(-,root,root,-) %{_bindir}/* %{_libdir}/* %dir %{_datadir}/%{name} %{_datadir}/%{name}/* %files devel %defattr(-,root,root,-) %{_includedir}/* %changelog bfgminer-bfgminer-3.10.0/phatk121016.cl000066400000000000000000000314001226556647300172730ustar00rootroot00000000000000// This file is taken and modified from the public-domain poclbm project, and // I have therefore decided to keep it public-domain. // Modified version copyright 2011-2012 Con Kolivas #ifdef VECTORS4 typedef uint4 u; #elif defined VECTORS2 typedef uint2 u; #else typedef uint u; #endif __constant uint K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; __constant uint ConstW[128] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000U, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000280U, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x80000000U, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000100U, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }; __constant uint H[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; #ifdef BITALIGN #pragma OPENCL EXTENSION cl_amd_media_ops : enable #define rot(x, y) amd_bitalign(x, x, (uint)(32 - y)) // This part is not from the stock poclbm kernel. It's part of an optimization // added in the Phoenix Miner. // Some AMD devices have Vals[0] BFI_INT opcode, which behaves exactly like the // SHA-256 Ch function, but provides it in exactly one instruction. If // detected, use it for Ch. Otherwise, construct Ch out of simpler logical // primitives. #ifdef BFI_INT // Well, slight problem... It turns out BFI_INT isn't actually exposed to // OpenCL (or CAL IL for that matter) in any way. However, there is // a similar instruction, BYTE_ALIGN_INT, which is exposed to OpenCL via // amd_bytealign, takes the same inputs, and provides the same output. // We can use that as a placeholder for BFI_INT and have the application // patch it after compilation. // This is the BFI_INT function #define Ch(x, y, z) amd_bytealign(x,y,z) // Ma can also be implemented in terms of BFI_INT... #define Ma(z, x, y) amd_bytealign(z^x,y,x) #else // BFI_INT // Later SDKs optimise this to BFI INT without patching and GCN // actually fails if manually patched with BFI_INT #define Ch(x, y, z) bitselect((u)z, (u)y, (u)x) #define Ma(x, y, z) bitselect((u)x, (u)y, (u)z ^ (u)x) #define rotr(x, y) amd_bitalign((u)x, (u)x, (u)y) #endif #else // BITALIGN #define Ch(x, y, z) (z ^ (x & (y ^ z))) #define Ma(x, y, z) ((x & z) | (y & (x | z))) #define rot(x, y) rotate(x, y) #define rotr(x, y) rotate((u)x, (u)(32-y)) #endif //Various intermediate calculations for each SHA round #define s0(n) (S0(Vals[(0 + 128 - (n)) % 8])) #define S0(n) (rot(n, 30u)^rot(n, 19u)^rot(n,10u)) #define s1(n) (S1(Vals[(4 + 128 - (n)) % 8])) #define S1(n) (rot(n, 26u)^rot(n, 21u)^rot(n, 7u)) #define ch(n) Ch(Vals[(4 + 128 - (n)) % 8],Vals[(5 + 128 - (n)) % 8],Vals[(6 + 128 - (n)) % 8]) #define maj(n) Ma(Vals[(1 + 128 - (n)) % 8],Vals[(2 + 128 - (n)) % 8],Vals[(0 + 128 - (n)) % 8]) //t1 calc when W is already calculated #define t1(n) K[(n) % 64] + Vals[(7 + 128 - (n)) % 8] + W[(n)] + s1(n) + ch(n) //t1 calc which calculates W #define t1W(n) K[(n) % 64] + Vals[(7 + 128 - (n)) % 8] + W(n) + s1(n) + ch(n) //Used for constant W Values (the compiler optimizes out zeros) #define t1C(n) (K[(n) % 64]+ ConstW[(n)]) + Vals[(7 + 128 - (n)) % 8] + s1(n) + ch(n) //t2 Calc #define t2(n) maj(n) + s0(n) #define rotC(x,n) (x<> (32-n)) //W calculation used for SHA round #define W(n) (W[n] = P4(n) + P3(n) + P2(n) + P1(n)) //Partial W calculations (used for the begining where only some values are nonzero) #define P1(n) ((rot(W[(n)-2],15u)^rot(W[(n)-2],13u)^((W[(n)-2])>>10U))) #define P2(n) ((rot(W[(n)-15],25u)^rot(W[(n)-15],14u)^((W[(n)-15])>>3U))) #define p1(x) ((rot(x,15u)^rot(x,13u)^((x)>>10U))) #define p2(x) ((rot(x,25u)^rot(x,14u)^((x)>>3U))) #define P3(n) W[n-7] #define P4(n) W[n-16] //Partial Calcs for constant W values #define P1C(n) ((rotC(ConstW[(n)-2],15)^rotC(ConstW[(n)-2],13)^((ConstW[(n)-2])>>10U))) #define P2C(n) ((rotC(ConstW[(n)-15],25)^rotC(ConstW[(n)-15],14)^((ConstW[(n)-15])>>3U))) #define P3C(x) ConstW[x-7] #define P4C(x) ConstW[x-16] //SHA round with built in W calc #define sharoundW(n) Barrier1(n); Vals[(3 + 128 - (n)) % 8] += t1W(n); Vals[(7 + 128 - (n)) % 8] = t1W(n) + t2(n); //SHA round without W calc #define sharound(n) Barrier2(n); Vals[(3 + 128 - (n)) % 8] += t1(n); Vals[(7 + 128 - (n)) % 8] = t1(n) + t2(n); //SHA round for constant W values #define sharoundC(n) Barrier3(n); Vals[(3 + 128 - (n)) % 8] += t1C(n); Vals[(7 + 128 - (n)) % 8] = t1C(n) + t2(n); //The compiler is stupid... I put this in there only to stop the compiler from (de)optimizing the order #define Barrier1(n) t1 = t1C((n+1)) #define Barrier2(n) t1 = t1C((n)) #define Barrier3(n) t1 = t1C((n)) //#define WORKSIZE 256 #define MAXBUFFERS (4095) __kernel __attribute__((reqd_work_group_size(WORKSIZE, 1, 1))) void search( const uint state0, const uint state1, const uint state2, const uint state3, const uint state4, const uint state5, const uint state6, const uint state7, const uint B1, const uint C1, const uint D1, const uint F1, const uint G1, const uint H1, const u base, const uint W16, const uint W17, const uint PreVal4, const uint PreVal0, const uint PreW18, const uint PreW19, const uint PreW31, const uint PreW32, volatile __global uint * output) { u W[124]; u Vals[8]; //Dummy Variable to prevent compiler from reordering between rounds u t1; //Vals[0]=state0; Vals[1]=B1; Vals[2]=C1; Vals[3]=D1; //Vals[4]=PreVal4; Vals[5]=F1; Vals[6]=G1; Vals[7]=H1; W[16] = W16; W[17] = W17; #ifdef VECTORS4 //Less dependencies to get both the local id and group id and then add them W[3] = base + (uint)(get_local_id(0)) * 4u + (uint)(get_group_id(0)) * (WORKSIZE * 4u); uint r = rot(W[3].x,25u)^rot(W[3].x,14u)^((W[3].x)>>3U); //Since only the 2 LSB is opposite between the nonces, we can save an instruction by flipping the 4 bits in W18 rather than the 1 bit in W3 W[18] = PreW18 + (u){r, r ^ 0x2004000U, r ^ 0x4008000U, r ^ 0x600C000U}; #elif defined VECTORS2 W[3] = base + (uint)(get_local_id(0)) * 2u + (uint)(get_group_id(0)) * (WORKSIZE * 2u); uint r = rot(W[3].x,25u)^rot(W[3].x,14u)^((W[3].x)>>3U); W[18] = PreW18 + (u){r, r ^ 0x2004000U}; #else W[3] = base + get_local_id(0) + get_group_id(0) * (WORKSIZE); u r = rot(W[3],25u)^rot(W[3],14u)^((W[3])>>3U); W[18] = PreW18 + r; #endif //the order of the W calcs and Rounds is like this because the compiler needs help finding how to order the instructions Vals[4] = PreVal4 + W[3]; Vals[0] = PreVal0 + W[3]; sharoundC(4); W[19] = PreW19 + W[3]; sharoundC(5); W[20] = P4C(20) + P1(20); sharoundC(6); W[21] = P1(21); sharoundC(7); W[22] = P3C(22) + P1(22); sharoundC(8); W[23] = W[16] + P1(23); sharoundC(9); W[24] = W[17] + P1(24); sharoundC(10); W[25] = P1(25) + P3(25); W[26] = P1(26) + P3(26); sharoundC(11); W[27] = P1(27) + P3(27); W[28] = P1(28) + P3(28); sharoundC(12); W[29] = P1(29) + P3(29); sharoundC(13); W[30] = P1(30) + P2C(30) + P3(30); W[31] = PreW31 + (P1(31) + P3(31)); sharoundC(14); W[32] = PreW32 + (P1(32) + P3(32)); sharoundC(15); sharound(16); sharound(17); sharound(18); sharound(19); sharound(20); sharound(21); sharound(22); sharound(23); sharound(24); sharound(25); sharound(26); sharound(27); sharound(28); sharound(29); sharound(30); sharound(31); sharound(32); sharoundW(33); sharoundW(34); sharoundW(35); sharoundW(36); sharoundW(37); sharoundW(38); sharoundW(39); sharoundW(40); sharoundW(41); sharoundW(42); sharoundW(43); sharoundW(44); sharoundW(45); sharoundW(46); sharoundW(47); sharoundW(48); sharoundW(49); sharoundW(50); sharoundW(51); sharoundW(52); sharoundW(53); sharoundW(54); sharoundW(55); sharoundW(56); sharoundW(57); sharoundW(58); sharoundW(59); sharoundW(60); sharoundW(61); sharoundW(62); sharoundW(63); W[64]=state0+Vals[0]; W[65]=state1+Vals[1]; W[66]=state2+Vals[2]; W[67]=state3+Vals[3]; W[68]=state4+Vals[4]; W[69]=state5+Vals[5]; W[70]=state6+Vals[6]; W[71]=state7+Vals[7]; Vals[0]=H[0]; Vals[1]=H[1]; Vals[2]=H[2]; Vals[3]=H[3]; Vals[4]=H[4]; Vals[5]=H[5]; Vals[6]=H[6]; Vals[7]=H[7]; //sharound(64 + 0); const u Temp = (0xb0edbdd0U + K[0]) + W[64]; Vals[7] = Temp + 0x08909ae5U; Vals[3] = 0xa54ff53aU + Temp; #define P124(n) P2(n) + P1(n) + P4(n) W[64 + 16] = + P2(64 + 16) + P4(64 + 16); sharound(64 + 1); W[64 + 17] = P1C(64 + 17) + P2(64 + 17) + P4(64 + 17); sharound(64 + 2); W[64 + 18] = P124(64 + 18); sharound(64 + 3); W[64 + 19] = P124(64 + 19); sharound(64 + 4); W[64 + 20] = P124(64 + 20); sharound(64 + 5); W[64 + 21] = P124(64 + 21); sharound(64 + 6); W[64 + 22] = P4(64 + 22) + P3C(64 + 22) + P2(64 + 22) + P1(64 + 22); sharound(64 + 7); W[64 + 23] = P4(64 + 23) + P3(64 + 23) + P2C(64 + 23) + P1(64 + 23); sharoundC(64 + 8); W[64 + 24] = P1(64 + 24) + P4C(64 + 24) + P3(64 + 24); sharoundC(64 + 9); W[64 + 25] = P3(64 + 25) + P1(64 + 25); sharoundC(64 + 10); W[64 + 26] = P3(64 + 26) + P1(64 + 26); sharoundC(64 + 11); W[64 + 27] = P3(64 + 27) + P1(64 + 27); sharoundC(64 + 12); W[64 + 28] = P3(64 + 28) + P1(64 + 28); sharoundC(64 + 13); W[64 + 29] = P1(64 + 29) + P3(64 + 29); W[64 + 30] = P3(64 + 30) + P2C(64 + 30) + P1(64 + 30); sharoundC(64 + 14); W[64 + 31] = P4C(64 + 31) + P3(64 + 31) + P2(64 + 31) + P1(64 + 31); sharoundC(64 + 15); sharound(64 + 16); sharound(64 + 17); sharound(64 + 18); sharound(64 + 19); sharound(64 + 20); sharound(64 + 21); sharound(64 + 22); sharound(64 + 23); sharound(64 + 24); sharound(64 + 25); sharound(64 + 26); sharound(64 + 27); sharound(64 + 28); sharound(64 + 29); sharound(64 + 30); sharound(64 + 31); sharoundW(64 + 32); sharoundW(64 + 33); sharoundW(64 + 34); sharoundW(64 + 35); sharoundW(64 + 36); sharoundW(64 + 37); sharoundW(64 + 38); sharoundW(64 + 39); sharoundW(64 + 40); sharoundW(64 + 41); sharoundW(64 + 42); sharoundW(64 + 43); sharoundW(64 + 44); sharoundW(64 + 45); sharoundW(64 + 46); sharoundW(64 + 47); sharoundW(64 + 48); sharoundW(64 + 49); sharoundW(64 + 50); sharoundW(64 + 51); sharoundW(64 + 52); sharoundW(64 + 53); sharoundW(64 + 54); sharoundW(64 + 55); sharoundW(64 + 56); sharoundW(64 + 57); sharoundW(64 + 58); W[117] += W[108] + Vals[3] + Vals[7] + P2(124) + P1(124) + Ch((Vals[0] + Vals[4]) + (K[59] + W(59+64)) + s1(64+59)+ ch(59+64),Vals[1],Vals[2]) - (-(K[60] + H[7]) - S1((Vals[0] + Vals[4]) + (K[59] + W(59+64)) + s1(64+59)+ ch(59+64))); #define FOUND (0x0F) #define SETFOUND(Xnonce) output[output[FOUND]++] = Xnonce #ifdef VECTORS4 bool result = W[117].x & W[117].y & W[117].z & W[117].w; if (!result) { if (!W[117].x) SETFOUND(W[3].x); if (!W[117].y) SETFOUND(W[3].y); if (!W[117].z) SETFOUND(W[3].z); if (!W[117].w) SETFOUND(W[3].w); } #elif defined VECTORS2 bool result = W[117].x & W[117].y; if (!result) { if (!W[117].x) SETFOUND(W[3].x); if (!W[117].y) SETFOUND(W[3].y); } #else if (!W[117]) SETFOUND(W[3]); #endif } bfgminer-bfgminer-3.10.0/poclbm130302.cl000066400000000000000000001254421226556647300174500ustar00rootroot00000000000000// -ck modified kernel taken from Phoenix taken from poclbm, with aspects of // phatk and others. // Modified version copyright 2011-2012 Con Kolivas // This file is taken and modified from the public-domain poclbm project, and // we have therefore decided to keep it public-domain in Phoenix. #ifdef VECTORS4 typedef uint4 u; #elif defined VECTORS2 typedef uint2 u; #else typedef uint u; #endif __constant uint K[87] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, 0xc19bf3f4U, 0x80000000U, 0x00000280U, 0x00a00055U, 0xf377ed68U, 0xa54ff53aU, 0x08909ae5U, 0x90bb1e3cU, 0x9b05688cU, 0xca0b3af3U, 0x3c6ef372U, 0xbb67ae85U, 0x6a09e667U, 0x50c6645bU, 0x510e527fU, 0x3ac42e24U, 0x5807aa98U, 0xc19bf274U, 0x00a00000U, 0x00000100U, 0x11002000U, 0x00400022U, 0x136032edU }; #define xc19bf3f4U K[64] #define x80000000U K[65] #define x00000280U K[66] #define x00a00055U K[67] #define xf377ed68U K[68] #define xa54ff53aU K[69] #define x08909ae5U K[70] #define x90bb1e3cU K[71] #define x9b05688cU K[72] #define xca0b3af3U K[73] #define x3c6ef372U K[74] #define xbb67ae85U K[75] #define x6a09e667U K[76] #define x50c6645bU K[77] #define x510e527fU K[78] #define x3ac42e24U K[79] #define x5807aa98U K[80] #define xc19bf274U K[81] #define x00a00000U K[82] #define x00000100U K[83] #define x11002000U K[84] #define x00400022U K[85] #define x136032edU K[86] // This part is not from the stock poclbm kernel. It's part of an optimization // added in the Phoenix Miner. // Some AMD devices have a BFI_INT opcode, which behaves exactly like the // SHA-256 ch function, but provides it in exactly one instruction. If // detected, use it for ch. Otherwise, construct ch out of simpler logical // primitives. #ifdef BITALIGN #pragma OPENCL EXTENSION cl_amd_media_ops : enable #define rotr(x, y) amd_bitalign((u)x, (u)x, (u)y) #else #define rotr(x, y) rotate((u)x, (u)(32 - y)) #endif #ifdef BFI_INT // Well, slight problem... It turns out BFI_INT isn't actually exposed to // OpenCL (or CAL IL for that matter) in any way. However, there is // a similar instruction, BYTE_ALIGN_INT, which is exposed to OpenCL via // amd_bytealign, takes the same inputs, and provides the same output. // We can use that as a placeholder for BFI_INT and have the application // patch it after compilation. // This is the BFI_INT function #define ch(x, y, z) amd_bytealign(x, y, z) // Ma can also be implemented in terms of BFI_INT... #define Ma(x, y, z) amd_bytealign( (z^x), (y), (x) ) // AMD's KernelAnalyzer throws errors compiling the kernel if we use // amd_bytealign on constants with vectors enabled, so we use this to avoid // problems. (this is used 4 times, and likely optimized out by the compiler.) #define Ma2(x, y, z) bitselect((u)x, (u)y, (u)z ^ (u)x) #else // BFI_INT //GCN actually fails if manually patched with BFI_INT #define ch(x, y, z) bitselect((u)z, (u)y, (u)x) #define Ma(x, y, z) bitselect((u)x, (u)y, (u)z ^ (u)x) #define Ma2(x, y, z) Ma(x, y, z) #endif __kernel __attribute__((vec_type_hint(u))) __attribute__((reqd_work_group_size(WORKSIZE, 1, 1))) void search(const uint state0, const uint state1, const uint state2, const uint state3, const uint state4, const uint state5, const uint state6, const uint state7, const uint b1, const uint c1, const uint f1, const uint g1, const uint h1, #ifndef GOFFSET const u base, #endif const uint fw0, const uint fw1, const uint fw2, const uint fw3, const uint fw15, const uint fw01r, const uint D1A, const uint C1addK5, const uint B1addK6, const uint W16addK16, const uint W17addK17, const uint PreVal4addT1, const uint Preval0, volatile __global uint * output) { u Vals[24]; u *W = &Vals[8]; #ifdef GOFFSET const u nonce = (uint)(get_global_id(0)); #else const u nonce = base + (uint)(get_global_id(0)); #endif Vals[5]=Preval0; Vals[5]+=nonce; Vals[0]=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],b1,c1); Vals[0]+=D1A; Vals[2]=Vals[0]; Vals[2]+=h1; Vals[1]=PreVal4addT1; Vals[1]+=nonce; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[6]=C1addK5; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],b1); Vals[3]=Vals[6]; Vals[3]+=g1; Vals[0]+=Ma2(g1,Vals[1],f1); Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma2(f1,Vals[0],Vals[1]); Vals[7]=B1addK6; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[4]=Vals[7]; Vals[4]+=f1; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[7]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[8]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[9]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[10]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[11]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[12]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[13]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[14]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=xc19bf3f4U; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=W16addK16; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=W17addK17; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[2]=(rotr(nonce,7)^rotr(nonce,18)^(nonce>>3U)); W[2]+=fw2; Vals[4]+=W[2]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[18]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[3]=nonce; W[3]+=fw3; Vals[1]+=W[3]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[19]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[4]=(rotr(W[2],17)^rotr(W[2],19)^(W[2]>>10U)); W[4]+=x80000000U; Vals[0]+=W[4]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[20]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[5]=(rotr(W[3],17)^rotr(W[3],19)^(W[3]>>10U)); Vals[6]+=W[5]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[21]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[6]=(rotr(W[4],17)^rotr(W[4],19)^(W[4]>>10U)); W[6]+=x00000280U; Vals[7]+=W[6]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[22]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[7]=(rotr(W[5],17)^rotr(W[5],19)^(W[5]>>10U)); W[7]+=fw0; Vals[5]+=W[7]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[23]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[8]=(rotr(W[6],17)^rotr(W[6],19)^(W[6]>>10U)); W[8]+=fw1; Vals[2]+=W[8]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[24]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[9]=W[2]; W[9]+=(rotr(W[7],17)^rotr(W[7],19)^(W[7]>>10U)); Vals[3]+=W[9]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[25]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[10]=W[3]; W[10]+=(rotr(W[8],17)^rotr(W[8],19)^(W[8]>>10U)); Vals[4]+=W[10]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[26]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[11]=W[4]; W[11]+=(rotr(W[9],17)^rotr(W[9],19)^(W[9]>>10U)); Vals[1]+=W[11]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[27]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[12]=W[5]; W[12]+=(rotr(W[10],17)^rotr(W[10],19)^(W[10]>>10U)); Vals[0]+=W[12]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[28]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[13]=W[6]; W[13]+=(rotr(W[11],17)^rotr(W[11],19)^(W[11]>>10U)); Vals[6]+=W[13]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[29]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[14]=x00a00055U; W[14]+=W[7]; W[14]+=(rotr(W[12],17)^rotr(W[12],19)^(W[12]>>10U)); Vals[7]+=W[14]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[30]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[15]=fw15; W[15]+=W[8]; W[15]+=(rotr(W[13],17)^rotr(W[13],19)^(W[13]>>10U)); Vals[5]+=W[15]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[31]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[0]=fw01r; W[0]+=W[9]; W[0]+=(rotr(W[14],17)^rotr(W[14],19)^(W[14]>>10U)); Vals[2]+=W[0]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[32]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[1]=fw1; W[1]+=(rotr(W[2],7)^rotr(W[2],18)^(W[2]>>3U)); W[1]+=W[10]; W[1]+=(rotr(W[15],17)^rotr(W[15],19)^(W[15]>>10U)); Vals[3]+=W[1]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[33]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[2]+=(rotr(W[3],7)^rotr(W[3],18)^(W[3]>>3U)); W[2]+=W[11]; W[2]+=(rotr(W[0],17)^rotr(W[0],19)^(W[0]>>10U)); Vals[4]+=W[2]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[34]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[3]+=(rotr(W[4],7)^rotr(W[4],18)^(W[4]>>3U)); W[3]+=W[12]; W[3]+=(rotr(W[1],17)^rotr(W[1],19)^(W[1]>>10U)); Vals[1]+=W[3]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[35]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[4]+=(rotr(W[5],7)^rotr(W[5],18)^(W[5]>>3U)); W[4]+=W[13]; W[4]+=(rotr(W[2],17)^rotr(W[2],19)^(W[2]>>10U)); Vals[0]+=W[4]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[36]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[5]+=(rotr(W[6],7)^rotr(W[6],18)^(W[6]>>3U)); W[5]+=W[14]; W[5]+=(rotr(W[3],17)^rotr(W[3],19)^(W[3]>>10U)); Vals[6]+=W[5]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[37]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[6]+=(rotr(W[7],7)^rotr(W[7],18)^(W[7]>>3U)); W[6]+=W[15]; W[6]+=(rotr(W[4],17)^rotr(W[4],19)^(W[4]>>10U)); Vals[7]+=W[6]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[38]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[7]+=(rotr(W[8],7)^rotr(W[8],18)^(W[8]>>3U)); W[7]+=W[0]; W[7]+=(rotr(W[5],17)^rotr(W[5],19)^(W[5]>>10U)); Vals[5]+=W[7]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[39]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[8]+=(rotr(W[9],7)^rotr(W[9],18)^(W[9]>>3U)); W[8]+=W[1]; W[8]+=(rotr(W[6],17)^rotr(W[6],19)^(W[6]>>10U)); Vals[2]+=W[8]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[40]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[9]+=(rotr(W[10],7)^rotr(W[10],18)^(W[10]>>3U)); W[9]+=W[2]; W[9]+=(rotr(W[7],17)^rotr(W[7],19)^(W[7]>>10U)); Vals[3]+=W[9]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[41]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[10]+=(rotr(W[11],7)^rotr(W[11],18)^(W[11]>>3U)); W[10]+=W[3]; W[10]+=(rotr(W[8],17)^rotr(W[8],19)^(W[8]>>10U)); Vals[4]+=W[10]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[42]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[11]+=(rotr(W[12],7)^rotr(W[12],18)^(W[12]>>3U)); W[11]+=W[4]; W[11]+=(rotr(W[9],17)^rotr(W[9],19)^(W[9]>>10U)); Vals[1]+=W[11]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[43]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[12]+=(rotr(W[13],7)^rotr(W[13],18)^(W[13]>>3U)); W[12]+=W[5]; W[12]+=(rotr(W[10],17)^rotr(W[10],19)^(W[10]>>10U)); Vals[0]+=W[12]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[44]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[13]+=(rotr(W[14],7)^rotr(W[14],18)^(W[14]>>3U)); W[13]+=W[6]; W[13]+=(rotr(W[11],17)^rotr(W[11],19)^(W[11]>>10U)); Vals[6]+=W[13]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[45]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[14]+=(rotr(W[15],7)^rotr(W[15],18)^(W[15]>>3U)); W[14]+=W[7]; W[14]+=(rotr(W[12],17)^rotr(W[12],19)^(W[12]>>10U)); Vals[7]+=W[14]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[46]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[15]+=(rotr(W[0],7)^rotr(W[0],18)^(W[0]>>3U)); W[15]+=W[8]; W[15]+=(rotr(W[13],17)^rotr(W[13],19)^(W[13]>>10U)); Vals[5]+=W[15]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[47]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[0]+=(rotr(W[1],7)^rotr(W[1],18)^(W[1]>>3U)); W[0]+=W[9]; W[0]+=(rotr(W[14],17)^rotr(W[14],19)^(W[14]>>10U)); Vals[2]+=W[0]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[48]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[1]+=(rotr(W[2],7)^rotr(W[2],18)^(W[2]>>3U)); W[1]+=W[10]; W[1]+=(rotr(W[15],17)^rotr(W[15],19)^(W[15]>>10U)); Vals[3]+=W[1]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[49]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[2]+=(rotr(W[3],7)^rotr(W[3],18)^(W[3]>>3U)); W[2]+=W[11]; W[2]+=(rotr(W[0],17)^rotr(W[0],19)^(W[0]>>10U)); Vals[4]+=W[2]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[50]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[3]+=(rotr(W[4],7)^rotr(W[4],18)^(W[4]>>3U)); W[3]+=W[12]; W[3]+=(rotr(W[1],17)^rotr(W[1],19)^(W[1]>>10U)); Vals[1]+=W[3]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[51]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[4]+=(rotr(W[5],7)^rotr(W[5],18)^(W[5]>>3U)); W[4]+=W[13]; W[4]+=(rotr(W[2],17)^rotr(W[2],19)^(W[2]>>10U)); Vals[0]+=W[4]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[52]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[5]+=(rotr(W[6],7)^rotr(W[6],18)^(W[6]>>3U)); W[5]+=W[14]; W[5]+=(rotr(W[3],17)^rotr(W[3],19)^(W[3]>>10U)); Vals[6]+=W[5]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[53]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[6]+=(rotr(W[7],7)^rotr(W[7],18)^(W[7]>>3U)); W[6]+=W[15]; W[6]+=(rotr(W[4],17)^rotr(W[4],19)^(W[4]>>10U)); Vals[7]+=W[6]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[54]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[7]+=(rotr(W[8],7)^rotr(W[8],18)^(W[8]>>3U)); W[7]+=W[0]; W[7]+=(rotr(W[5],17)^rotr(W[5],19)^(W[5]>>10U)); Vals[5]+=W[7]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[55]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[8]+=(rotr(W[9],7)^rotr(W[9],18)^(W[9]>>3U)); W[8]+=W[1]; W[8]+=(rotr(W[6],17)^rotr(W[6],19)^(W[6]>>10U)); Vals[2]+=W[8]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[56]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[9]+=(rotr(W[10],7)^rotr(W[10],18)^(W[10]>>3U)); W[9]+=W[2]; W[9]+=(rotr(W[7],17)^rotr(W[7],19)^(W[7]>>10U)); Vals[3]+=W[9]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[57]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[10]+=(rotr(W[11],7)^rotr(W[11],18)^(W[11]>>3U)); W[10]+=W[3]; W[10]+=(rotr(W[8],17)^rotr(W[8],19)^(W[8]>>10U)); Vals[4]+=W[10]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[58]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[11]+=(rotr(W[12],7)^rotr(W[12],18)^(W[12]>>3U)); W[11]+=W[4]; W[11]+=(rotr(W[9],17)^rotr(W[9],19)^(W[9]>>10U)); Vals[1]+=W[11]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[59]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[12]+=(rotr(W[13],7)^rotr(W[13],18)^(W[13]>>3U)); W[12]+=W[5]; W[12]+=(rotr(W[10],17)^rotr(W[10],19)^(W[10]>>10U)); Vals[0]+=W[12]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[60]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[13]+=(rotr(W[14],7)^rotr(W[14],18)^(W[14]>>3U)); W[13]+=W[6]; W[13]+=(rotr(W[11],17)^rotr(W[11],19)^(W[11]>>10U)); Vals[6]+=W[13]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[61]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); Vals[7]+=W[14]; Vals[7]+=(rotr(W[15],7)^rotr(W[15],18)^(W[15]>>3U)); Vals[7]+=W[7]; Vals[7]+=(rotr(W[12],17)^rotr(W[12],19)^(W[12]>>10U)); Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[62]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); Vals[5]+=W[15]; Vals[5]+=(rotr(W[0],7)^rotr(W[0],18)^(W[0]>>3U)); Vals[5]+=W[8]; Vals[5]+=(rotr(W[13],17)^rotr(W[13],19)^(W[13]>>10U)); Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[63]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); Vals[5]+=state0; W[7]=state7; W[7]+=Vals[2]; Vals[2]=xf377ed68U; Vals[2]+=Vals[5]; W[0]=Vals[5]; Vals[5]=x6a09e667U; W[3]=state3; W[3]+=Vals[0]; Vals[0]=xa54ff53aU; Vals[0]+=Vals[2]; Vals[2]+=x08909ae5U; W[6]=state6; W[6]+=Vals[3]; Vals[3]=x90bb1e3cU; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=(x9b05688cU^(Vals[0]&xca0b3af3U)); Vals[7]+=state1; Vals[3]+=Vals[7]; W[1]=Vals[7]; Vals[7]=xbb67ae85U; W[2]=state2; W[2]+=Vals[6]; Vals[6]=x3c6ef372U; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma2(Vals[7],Vals[2],Vals[5]); W[5]=state5; W[5]+=Vals[4]; Vals[4]=x50c6645bU; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],x510e527fU); Vals[4]+=W[2]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma2(Vals[5],Vals[3],Vals[2]); W[4]=state4; W[4]+=Vals[1]; Vals[1]=x3ac42e24U; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=W[3]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[4]; Vals[0]+=W[4]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[5]; Vals[6]+=W[5]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[6]; Vals[7]+=W[6]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[7]; Vals[5]+=W[7]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=x5807aa98U; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[9]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[10]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[11]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[12]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[13]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[14]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=xc19bf274U; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[0]+=(rotr(W[1],7)^rotr(W[1],18)^(W[1]>>3U)); Vals[2]+=W[0]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[16]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[1]+=(rotr(W[2],7)^rotr(W[2],18)^(W[2]>>3U)); W[1]+=x00a00000U; Vals[3]+=W[1]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[17]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[2]+=(rotr(W[3],7)^rotr(W[3],18)^(W[3]>>3U)); W[2]+=(rotr(W[0],17)^rotr(W[0],19)^(W[0]>>10U)); Vals[4]+=W[2]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[18]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[3]+=(rotr(W[4],7)^rotr(W[4],18)^(W[4]>>3U)); W[3]+=(rotr(W[1],17)^rotr(W[1],19)^(W[1]>>10U)); Vals[1]+=W[3]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[19]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[4]+=(rotr(W[5],7)^rotr(W[5],18)^(W[5]>>3U)); W[4]+=(rotr(W[2],17)^rotr(W[2],19)^(W[2]>>10U)); Vals[0]+=W[4]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[20]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[5]+=(rotr(W[6],7)^rotr(W[6],18)^(W[6]>>3U)); W[5]+=(rotr(W[3],17)^rotr(W[3],19)^(W[3]>>10U)); Vals[6]+=W[5]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[21]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[6]+=(rotr(W[7],7)^rotr(W[7],18)^(W[7]>>3U)); W[6]+=x00000100U; W[6]+=(rotr(W[4],17)^rotr(W[4],19)^(W[4]>>10U)); Vals[7]+=W[6]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[22]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[7]+=x11002000U; W[7]+=W[0]; W[7]+=(rotr(W[5],17)^rotr(W[5],19)^(W[5]>>10U)); Vals[5]+=W[7]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[23]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[8]=x80000000U; W[8]+=W[1]; W[8]+=(rotr(W[6],17)^rotr(W[6],19)^(W[6]>>10U)); Vals[2]+=W[8]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[24]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[9]=W[2]; W[9]+=(rotr(W[7],17)^rotr(W[7],19)^(W[7]>>10U)); Vals[3]+=W[9]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[25]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[10]=W[3]; W[10]+=(rotr(W[8],17)^rotr(W[8],19)^(W[8]>>10U)); Vals[4]+=W[10]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[26]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[11]=W[4]; W[11]+=(rotr(W[9],17)^rotr(W[9],19)^(W[9]>>10U)); Vals[1]+=W[11]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[27]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[12]=W[5]; W[12]+=(rotr(W[10],17)^rotr(W[10],19)^(W[10]>>10U)); Vals[0]+=W[12]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[28]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[13]=W[6]; W[13]+=(rotr(W[11],17)^rotr(W[11],19)^(W[11]>>10U)); Vals[6]+=W[13]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[29]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[14]=x00400022U; W[14]+=W[7]; W[14]+=(rotr(W[12],17)^rotr(W[12],19)^(W[12]>>10U)); Vals[7]+=W[14]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[30]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[15]=x00000100U; W[15]+=(rotr(W[0],7)^rotr(W[0],18)^(W[0]>>3U)); W[15]+=W[8]; W[15]+=(rotr(W[13],17)^rotr(W[13],19)^(W[13]>>10U)); Vals[5]+=W[15]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[31]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[0]+=(rotr(W[1],7)^rotr(W[1],18)^(W[1]>>3U)); W[0]+=W[9]; W[0]+=(rotr(W[14],17)^rotr(W[14],19)^(W[14]>>10U)); Vals[2]+=W[0]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[32]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[1]+=(rotr(W[2],7)^rotr(W[2],18)^(W[2]>>3U)); W[1]+=W[10]; W[1]+=(rotr(W[15],17)^rotr(W[15],19)^(W[15]>>10U)); Vals[3]+=W[1]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[33]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[2]+=(rotr(W[3],7)^rotr(W[3],18)^(W[3]>>3U)); W[2]+=W[11]; W[2]+=(rotr(W[0],17)^rotr(W[0],19)^(W[0]>>10U)); Vals[4]+=W[2]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[34]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[3]+=(rotr(W[4],7)^rotr(W[4],18)^(W[4]>>3U)); W[3]+=W[12]; W[3]+=(rotr(W[1],17)^rotr(W[1],19)^(W[1]>>10U)); Vals[1]+=W[3]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[35]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[4]+=(rotr(W[5],7)^rotr(W[5],18)^(W[5]>>3U)); W[4]+=W[13]; W[4]+=(rotr(W[2],17)^rotr(W[2],19)^(W[2]>>10U)); Vals[0]+=W[4]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[36]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[5]+=(rotr(W[6],7)^rotr(W[6],18)^(W[6]>>3U)); W[5]+=W[14]; W[5]+=(rotr(W[3],17)^rotr(W[3],19)^(W[3]>>10U)); Vals[6]+=W[5]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[37]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[6]+=(rotr(W[7],7)^rotr(W[7],18)^(W[7]>>3U)); W[6]+=W[15]; W[6]+=(rotr(W[4],17)^rotr(W[4],19)^(W[4]>>10U)); Vals[7]+=W[6]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[38]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[7]+=(rotr(W[8],7)^rotr(W[8],18)^(W[8]>>3U)); W[7]+=W[0]; W[7]+=(rotr(W[5],17)^rotr(W[5],19)^(W[5]>>10U)); Vals[5]+=W[7]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[39]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[8]+=(rotr(W[9],7)^rotr(W[9],18)^(W[9]>>3U)); W[8]+=W[1]; W[8]+=(rotr(W[6],17)^rotr(W[6],19)^(W[6]>>10U)); Vals[2]+=W[8]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[40]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[9]+=(rotr(W[10],7)^rotr(W[10],18)^(W[10]>>3U)); W[9]+=W[2]; W[9]+=(rotr(W[7],17)^rotr(W[7],19)^(W[7]>>10U)); Vals[3]+=W[9]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[41]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[10]+=(rotr(W[11],7)^rotr(W[11],18)^(W[11]>>3U)); W[10]+=W[3]; W[10]+=(rotr(W[8],17)^rotr(W[8],19)^(W[8]>>10U)); Vals[4]+=W[10]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[42]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[11]+=(rotr(W[12],7)^rotr(W[12],18)^(W[12]>>3U)); W[11]+=W[4]; W[11]+=(rotr(W[9],17)^rotr(W[9],19)^(W[9]>>10U)); Vals[1]+=W[11]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[43]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[12]+=(rotr(W[13],7)^rotr(W[13],18)^(W[13]>>3U)); W[12]+=W[5]; W[12]+=(rotr(W[10],17)^rotr(W[10],19)^(W[10]>>10U)); Vals[0]+=W[12]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[44]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[13]+=(rotr(W[14],7)^rotr(W[14],18)^(W[14]>>3U)); W[13]+=W[6]; W[13]+=(rotr(W[11],17)^rotr(W[11],19)^(W[11]>>10U)); Vals[6]+=W[13]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[45]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[14]+=(rotr(W[15],7)^rotr(W[15],18)^(W[15]>>3U)); W[14]+=W[7]; W[14]+=(rotr(W[12],17)^rotr(W[12],19)^(W[12]>>10U)); Vals[7]+=W[14]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[46]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[15]+=(rotr(W[0],7)^rotr(W[0],18)^(W[0]>>3U)); W[15]+=W[8]; W[15]+=(rotr(W[13],17)^rotr(W[13],19)^(W[13]>>10U)); Vals[5]+=W[15]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[47]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[0]+=(rotr(W[1],7)^rotr(W[1],18)^(W[1]>>3U)); W[0]+=W[9]; W[0]+=(rotr(W[14],17)^rotr(W[14],19)^(W[14]>>10U)); Vals[2]+=W[0]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[48]; Vals[0]+=Vals[2]; Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); W[1]+=(rotr(W[2],7)^rotr(W[2],18)^(W[2]>>3U)); W[1]+=W[10]; W[1]+=(rotr(W[15],17)^rotr(W[15],19)^(W[15]>>10U)); Vals[3]+=W[1]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[49]; Vals[6]+=Vals[3]; Vals[3]+=(rotr(Vals[2],2)^rotr(Vals[2],13)^rotr(Vals[2],22)); Vals[3]+=Ma(Vals[7],Vals[2],Vals[5]); W[2]+=(rotr(W[3],7)^rotr(W[3],18)^(W[3]>>3U)); W[2]+=W[11]; W[2]+=(rotr(W[0],17)^rotr(W[0],19)^(W[0]>>10U)); Vals[4]+=W[2]; Vals[4]+=(rotr(Vals[6],6)^rotr(Vals[6],11)^rotr(Vals[6],25)); Vals[4]+=ch(Vals[6],Vals[0],Vals[1]); Vals[4]+=K[50]; Vals[7]+=Vals[4]; Vals[4]+=(rotr(Vals[3],2)^rotr(Vals[3],13)^rotr(Vals[3],22)); Vals[4]+=Ma(Vals[5],Vals[3],Vals[2]); W[3]+=(rotr(W[4],7)^rotr(W[4],18)^(W[4]>>3U)); W[3]+=W[12]; W[3]+=(rotr(W[1],17)^rotr(W[1],19)^(W[1]>>10U)); Vals[1]+=W[3]; Vals[1]+=(rotr(Vals[7],6)^rotr(Vals[7],11)^rotr(Vals[7],25)); Vals[1]+=ch(Vals[7],Vals[6],Vals[0]); Vals[1]+=K[51]; Vals[5]+=Vals[1]; Vals[1]+=(rotr(Vals[4],2)^rotr(Vals[4],13)^rotr(Vals[4],22)); Vals[1]+=Ma(Vals[2],Vals[4],Vals[3]); W[4]+=(rotr(W[5],7)^rotr(W[5],18)^(W[5]>>3U)); W[4]+=W[13]; W[4]+=(rotr(W[2],17)^rotr(W[2],19)^(W[2]>>10U)); Vals[0]+=W[4]; Vals[0]+=(rotr(Vals[5],6)^rotr(Vals[5],11)^rotr(Vals[5],25)); Vals[0]+=ch(Vals[5],Vals[7],Vals[6]); Vals[0]+=K[52]; Vals[2]+=Vals[0]; Vals[0]+=(rotr(Vals[1],2)^rotr(Vals[1],13)^rotr(Vals[1],22)); Vals[0]+=Ma(Vals[3],Vals[1],Vals[4]); W[5]+=(rotr(W[6],7)^rotr(W[6],18)^(W[6]>>3U)); W[5]+=W[14]; W[5]+=(rotr(W[3],17)^rotr(W[3],19)^(W[3]>>10U)); Vals[6]+=W[5]; Vals[6]+=(rotr(Vals[2],6)^rotr(Vals[2],11)^rotr(Vals[2],25)); Vals[6]+=ch(Vals[2],Vals[5],Vals[7]); Vals[6]+=K[53]; Vals[3]+=Vals[6]; Vals[6]+=(rotr(Vals[0],2)^rotr(Vals[0],13)^rotr(Vals[0],22)); Vals[6]+=Ma(Vals[4],Vals[0],Vals[1]); W[6]+=(rotr(W[7],7)^rotr(W[7],18)^(W[7]>>3U)); W[6]+=W[15]; W[6]+=(rotr(W[4],17)^rotr(W[4],19)^(W[4]>>10U)); Vals[7]+=W[6]; Vals[7]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[7]+=ch(Vals[3],Vals[2],Vals[5]); Vals[7]+=K[54]; Vals[4]+=Vals[7]; Vals[7]+=(rotr(Vals[6],2)^rotr(Vals[6],13)^rotr(Vals[6],22)); Vals[7]+=Ma(Vals[1],Vals[6],Vals[0]); W[7]+=(rotr(W[8],7)^rotr(W[8],18)^(W[8]>>3U)); W[7]+=W[0]; W[7]+=(rotr(W[5],17)^rotr(W[5],19)^(W[5]>>10U)); Vals[5]+=W[7]; Vals[5]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[5]+=ch(Vals[4],Vals[3],Vals[2]); Vals[5]+=K[55]; Vals[1]+=Vals[5]; Vals[5]+=(rotr(Vals[7],2)^rotr(Vals[7],13)^rotr(Vals[7],22)); Vals[5]+=Ma(Vals[0],Vals[7],Vals[6]); W[8]+=(rotr(W[9],7)^rotr(W[9],18)^(W[9]>>3U)); W[8]+=W[1]; W[8]+=(rotr(W[6],17)^rotr(W[6],19)^(W[6]>>10U)); Vals[2]+=W[8]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); Vals[2]+=K[56]; Vals[0]+=Vals[2]; W[9]+=(rotr(W[10],7)^rotr(W[10],18)^(W[10]>>3U)); W[9]+=W[2]; W[9]+=(rotr(W[7],17)^rotr(W[7],19)^(W[7]>>10U)); Vals[3]+=W[9]; Vals[3]+=(rotr(Vals[0],6)^rotr(Vals[0],11)^rotr(Vals[0],25)); Vals[3]+=ch(Vals[0],Vals[1],Vals[4]); Vals[3]+=K[57]; Vals[3]+=Vals[6]; W[10]+=(rotr(W[11],7)^rotr(W[11],18)^(W[11]>>3U)); W[10]+=W[3]; W[10]+=(rotr(W[8],17)^rotr(W[8],19)^(W[8]>>10U)); Vals[4]+=W[10]; Vals[4]+=(rotr(Vals[3],6)^rotr(Vals[3],11)^rotr(Vals[3],25)); Vals[4]+=ch(Vals[3],Vals[0],Vals[1]); Vals[4]+=K[58]; Vals[4]+=Vals[7]; Vals[1]+=(rotr(Vals[4],6)^rotr(Vals[4],11)^rotr(Vals[4],25)); Vals[1]+=ch(Vals[4],Vals[3],Vals[0]); Vals[1]+=W[11]; Vals[1]+=(rotr(W[12],7)^rotr(W[12],18)^(W[12]>>3U)); Vals[1]+=W[4]; Vals[1]+=(rotr(W[9],17)^rotr(W[9],19)^(W[9]>>10U)); Vals[1]+=K[59]; Vals[1]+=Vals[5]; Vals[2]+=Ma(Vals[6],Vals[5],Vals[7]); Vals[2]+=(rotr(Vals[5],2)^rotr(Vals[5],13)^rotr(Vals[5],22)); Vals[2]+=W[12]; Vals[2]+=(rotr(W[13],7)^rotr(W[13],18)^(W[13]>>3U)); Vals[2]+=W[5]; Vals[2]+=(rotr(W[10],17)^rotr(W[10],19)^(W[10]>>10U)); Vals[2]+=Vals[0]; Vals[2]+=(rotr(Vals[1],6)^rotr(Vals[1],11)^rotr(Vals[1],25)); Vals[2]+=ch(Vals[1],Vals[4],Vals[3]); #define FOUND (0x0F) #define SETFOUND(Xnonce) output[output[FOUND]++] = Xnonce #if defined(VECTORS2) || defined(VECTORS4) if (any(Vals[2] == x136032edU)) { if (Vals[2].x == x136032edU) SETFOUND(nonce.x); if (Vals[2].y == x136032edU) SETFOUND(nonce.y); #if defined(VECTORS4) if (Vals[2].z == x136032edU) SETFOUND(nonce.z); if (Vals[2].w == x136032edU) SETFOUND(nonce.w); #endif } #else if (Vals[2] == x136032edU) SETFOUND(nonce); #endif } bfgminer-bfgminer-3.10.0/scrypt.c000066400000000000000000000332121226556647300166640ustar00rootroot00000000000000/*- * Copyright 2009 Colin Percival, 2011 ArtForz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ #include "config.h" #include "miner.h" #include #include #include #include typedef struct SHA256Context { uint32_t state[8]; uint32_t buf[16]; } SHA256_CTX; /* * Encode a length len/4 vector of (uint32_t) into a length len vector of * (unsigned char) in big-endian form. Assumes len is a multiple of 4. */ static inline void be32enc_vect(uint32_t *dst, const uint32_t *src, uint32_t len) { uint32_t i; for (i = 0; i < len; i++) dst[i] = htobe32(src[i]); } /* Elementary functions used by SHA256 */ #define Ch(x, y, z) ((x & (y ^ z)) ^ z) #define Maj(x, y, z) ((x & (y | z)) | (y & z)) #define SHR(x, n) (x >> n) #define ROTR(x, n) ((x >> n) | (x << (32 - n))) #define S0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) #define S1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) #define s0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3)) #define s1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHR(x, 10)) /* SHA256 round function */ #define RND(a, b, c, d, e, f, g, h, k) \ t0 = h + S1(e) + Ch(e, f, g) + k; \ t1 = S0(a) + Maj(a, b, c); \ d += t0; \ h = t0 + t1; /* Adjusted round function for rotating state */ #define RNDr(S, W, i, k) \ RND(S[(64 - i) % 8], S[(65 - i) % 8], \ S[(66 - i) % 8], S[(67 - i) % 8], \ S[(68 - i) % 8], S[(69 - i) % 8], \ S[(70 - i) % 8], S[(71 - i) % 8], \ W[i] + k) /* * SHA256 block compression function. The 256-bit state is transformed via * the 512-bit input block to produce a new state. */ static void SHA256_Transform(uint32_t * state, const uint32_t block[16], int swap) { uint32_t W[64]; uint32_t S[8]; uint32_t t0, t1; int i; /* 1. Prepare message schedule W. */ if(swap) for (i = 0; i < 16; i++) W[i] = htobe32(block[i]); else memcpy(W, block, 64); for (i = 16; i < 64; i += 2) { W[i] = s1(W[i - 2]) + W[i - 7] + s0(W[i - 15]) + W[i - 16]; W[i+1] = s1(W[i - 1]) + W[i - 6] + s0(W[i - 14]) + W[i - 15]; } /* 2. Initialize working variables. */ memcpy(S, state, 32); /* 3. Mix. */ RNDr(S, W, 0, 0x428a2f98); RNDr(S, W, 1, 0x71374491); RNDr(S, W, 2, 0xb5c0fbcf); RNDr(S, W, 3, 0xe9b5dba5); RNDr(S, W, 4, 0x3956c25b); RNDr(S, W, 5, 0x59f111f1); RNDr(S, W, 6, 0x923f82a4); RNDr(S, W, 7, 0xab1c5ed5); RNDr(S, W, 8, 0xd807aa98); RNDr(S, W, 9, 0x12835b01); RNDr(S, W, 10, 0x243185be); RNDr(S, W, 11, 0x550c7dc3); RNDr(S, W, 12, 0x72be5d74); RNDr(S, W, 13, 0x80deb1fe); RNDr(S, W, 14, 0x9bdc06a7); RNDr(S, W, 15, 0xc19bf174); RNDr(S, W, 16, 0xe49b69c1); RNDr(S, W, 17, 0xefbe4786); RNDr(S, W, 18, 0x0fc19dc6); RNDr(S, W, 19, 0x240ca1cc); RNDr(S, W, 20, 0x2de92c6f); RNDr(S, W, 21, 0x4a7484aa); RNDr(S, W, 22, 0x5cb0a9dc); RNDr(S, W, 23, 0x76f988da); RNDr(S, W, 24, 0x983e5152); RNDr(S, W, 25, 0xa831c66d); RNDr(S, W, 26, 0xb00327c8); RNDr(S, W, 27, 0xbf597fc7); RNDr(S, W, 28, 0xc6e00bf3); RNDr(S, W, 29, 0xd5a79147); RNDr(S, W, 30, 0x06ca6351); RNDr(S, W, 31, 0x14292967); RNDr(S, W, 32, 0x27b70a85); RNDr(S, W, 33, 0x2e1b2138); RNDr(S, W, 34, 0x4d2c6dfc); RNDr(S, W, 35, 0x53380d13); RNDr(S, W, 36, 0x650a7354); RNDr(S, W, 37, 0x766a0abb); RNDr(S, W, 38, 0x81c2c92e); RNDr(S, W, 39, 0x92722c85); RNDr(S, W, 40, 0xa2bfe8a1); RNDr(S, W, 41, 0xa81a664b); RNDr(S, W, 42, 0xc24b8b70); RNDr(S, W, 43, 0xc76c51a3); RNDr(S, W, 44, 0xd192e819); RNDr(S, W, 45, 0xd6990624); RNDr(S, W, 46, 0xf40e3585); RNDr(S, W, 47, 0x106aa070); RNDr(S, W, 48, 0x19a4c116); RNDr(S, W, 49, 0x1e376c08); RNDr(S, W, 50, 0x2748774c); RNDr(S, W, 51, 0x34b0bcb5); RNDr(S, W, 52, 0x391c0cb3); RNDr(S, W, 53, 0x4ed8aa4a); RNDr(S, W, 54, 0x5b9cca4f); RNDr(S, W, 55, 0x682e6ff3); RNDr(S, W, 56, 0x748f82ee); RNDr(S, W, 57, 0x78a5636f); RNDr(S, W, 58, 0x84c87814); RNDr(S, W, 59, 0x8cc70208); RNDr(S, W, 60, 0x90befffa); RNDr(S, W, 61, 0xa4506ceb); RNDr(S, W, 62, 0xbef9a3f7); RNDr(S, W, 63, 0xc67178f2); /* 4. Mix local working variables into global state */ for (i = 0; i < 8; i++) state[i] += S[i]; } static inline void SHA256_InitState(uint32_t * state) { /* Magic initialization constants */ state[0] = 0x6A09E667; state[1] = 0xBB67AE85; state[2] = 0x3C6EF372; state[3] = 0xA54FF53A; state[4] = 0x510E527F; state[5] = 0x9B05688C; state[6] = 0x1F83D9AB; state[7] = 0x5BE0CD19; } static const uint32_t passwdpad[12] = {0x00000080, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80020000}; static const uint32_t outerpad[8] = {0x80000000, 0, 0, 0, 0, 0, 0, 0x00000300}; /** * PBKDF2_SHA256(passwd, passwdlen, salt, saltlen, c, buf, dkLen): * Compute PBKDF2(passwd, salt, c, dkLen) using HMAC-SHA256 as the PRF, and * write the output to buf. The value dkLen must be at most 32 * (2^32 - 1). */ static inline void PBKDF2_SHA256_80_128(const uint32_t * passwd, uint32_t * buf) { SHA256_CTX PShictx, PShoctx; uint32_t tstate[8]; uint32_t ihash[8]; uint32_t i; uint32_t pad[16]; static const uint32_t innerpad[11] = {0x00000080, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xa0040000}; /* If Klen > 64, the key is really SHA256(K). */ SHA256_InitState(tstate); SHA256_Transform(tstate, passwd, 1); memcpy(pad, passwd+16, 16); memcpy(pad+4, passwdpad, 48); SHA256_Transform(tstate, pad, 1); memcpy(ihash, tstate, 32); SHA256_InitState(PShictx.state); for (i = 0; i < 8; i++) pad[i] = ihash[i] ^ 0x36363636; for (; i < 16; i++) pad[i] = 0x36363636; SHA256_Transform(PShictx.state, pad, 0); SHA256_Transform(PShictx.state, passwd, 1); be32enc_vect(PShictx.buf, passwd+16, 4); be32enc_vect(PShictx.buf+5, innerpad, 11); SHA256_InitState(PShoctx.state); for (i = 0; i < 8; i++) pad[i] = ihash[i] ^ 0x5c5c5c5c; for (; i < 16; i++) pad[i] = 0x5c5c5c5c; SHA256_Transform(PShoctx.state, pad, 0); memcpy(PShoctx.buf+8, outerpad, 32); /* Iterate through the blocks. */ for (i = 0; i < 4; i++) { uint32_t istate[8]; uint32_t ostate[8]; memcpy(istate, PShictx.state, 32); PShictx.buf[4] = i + 1; SHA256_Transform(istate, PShictx.buf, 0); memcpy(PShoctx.buf, istate, 32); memcpy(ostate, PShoctx.state, 32); SHA256_Transform(ostate, PShoctx.buf, 0); be32enc_vect(buf+i*8, ostate, 8); } } static inline void PBKDF2_SHA256_80_128_32(const uint32_t * passwd, const uint32_t * salt, uint32_t *ostate) { uint32_t tstate[8]; uint32_t ihash[8]; uint32_t i; /* Compute HMAC state after processing P and S. */ uint32_t pad[16]; static const uint32_t ihash_finalblk[16] = {0x00000001,0x80000000,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0x00000620}; /* If Klen > 64, the key is really SHA256(K). */ SHA256_InitState(tstate); SHA256_Transform(tstate, passwd, 1); memcpy(pad, passwd+16, 16); memcpy(pad+4, passwdpad, 48); SHA256_Transform(tstate, pad, 1); memcpy(ihash, tstate, 32); SHA256_InitState(ostate); for (i = 0; i < 8; i++) pad[i] = ihash[i] ^ 0x5c5c5c5c; for (; i < 16; i++) pad[i] = 0x5c5c5c5c; SHA256_Transform(ostate, pad, 0); SHA256_InitState(tstate); for (i = 0; i < 8; i++) pad[i] = ihash[i] ^ 0x36363636; for (; i < 16; i++) pad[i] = 0x36363636; SHA256_Transform(tstate, pad, 0); SHA256_Transform(tstate, salt, 1); SHA256_Transform(tstate, salt+16, 1); SHA256_Transform(tstate, ihash_finalblk, 0); memcpy(pad, tstate, 32); memcpy(pad+8, outerpad, 32); /* Feed the inner hash to the outer SHA256 operation. */ SHA256_Transform(ostate, pad, 0); } /** * salsa20_8(B): * Apply the salsa20/8 core to the provided block. */ static inline void salsa20_8(uint32_t B[16], const uint32_t Bx[16]) { uint32_t x00,x01,x02,x03,x04,x05,x06,x07,x08,x09,x10,x11,x12,x13,x14,x15; size_t i; x00 = (B[ 0] ^= Bx[ 0]); x01 = (B[ 1] ^= Bx[ 1]); x02 = (B[ 2] ^= Bx[ 2]); x03 = (B[ 3] ^= Bx[ 3]); x04 = (B[ 4] ^= Bx[ 4]); x05 = (B[ 5] ^= Bx[ 5]); x06 = (B[ 6] ^= Bx[ 6]); x07 = (B[ 7] ^= Bx[ 7]); x08 = (B[ 8] ^= Bx[ 8]); x09 = (B[ 9] ^= Bx[ 9]); x10 = (B[10] ^= Bx[10]); x11 = (B[11] ^= Bx[11]); x12 = (B[12] ^= Bx[12]); x13 = (B[13] ^= Bx[13]); x14 = (B[14] ^= Bx[14]); x15 = (B[15] ^= Bx[15]); for (i = 0; i < 8; i += 2) { #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) /* Operate on columns. */ x04 ^= R(x00+x12, 7); x09 ^= R(x05+x01, 7); x14 ^= R(x10+x06, 7); x03 ^= R(x15+x11, 7); x08 ^= R(x04+x00, 9); x13 ^= R(x09+x05, 9); x02 ^= R(x14+x10, 9); x07 ^= R(x03+x15, 9); x12 ^= R(x08+x04,13); x01 ^= R(x13+x09,13); x06 ^= R(x02+x14,13); x11 ^= R(x07+x03,13); x00 ^= R(x12+x08,18); x05 ^= R(x01+x13,18); x10 ^= R(x06+x02,18); x15 ^= R(x11+x07,18); /* Operate on rows. */ x01 ^= R(x00+x03, 7); x06 ^= R(x05+x04, 7); x11 ^= R(x10+x09, 7); x12 ^= R(x15+x14, 7); x02 ^= R(x01+x00, 9); x07 ^= R(x06+x05, 9); x08 ^= R(x11+x10, 9); x13 ^= R(x12+x15, 9); x03 ^= R(x02+x01,13); x04 ^= R(x07+x06,13); x09 ^= R(x08+x11,13); x14 ^= R(x13+x12,13); x00 ^= R(x03+x02,18); x05 ^= R(x04+x07,18); x10 ^= R(x09+x08,18); x15 ^= R(x14+x13,18); #undef R } B[ 0] += x00; B[ 1] += x01; B[ 2] += x02; B[ 3] += x03; B[ 4] += x04; B[ 5] += x05; B[ 6] += x06; B[ 7] += x07; B[ 8] += x08; B[ 9] += x09; B[10] += x10; B[11] += x11; B[12] += x12; B[13] += x13; B[14] += x14; B[15] += x15; } /* cpu and memory intensive function to transform a 80 byte buffer into a 32 byte output scratchpad size needs to be at least 63 + (128 * r * p) + (256 * r + 64) + (128 * r * N) bytes */ static void scrypt_1024_1_1_256_sp(const uint32_t* input, char* scratchpad, uint32_t *ostate) { uint32_t * V; uint32_t X[32]; uint32_t i; uint32_t j; uint32_t k; uint64_t *p1, *p2; p1 = (uint64_t *)X; V = (uint32_t *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63)); PBKDF2_SHA256_80_128(input, X); for (i = 0; i < 1024; i += 2) { memcpy(&V[i * 32], X, 128); salsa20_8(&X[0], &X[16]); salsa20_8(&X[16], &X[0]); memcpy(&V[(i + 1) * 32], X, 128); salsa20_8(&X[0], &X[16]); salsa20_8(&X[16], &X[0]); } for (i = 0; i < 1024; i += 2) { j = X[16] & 1023; p2 = (uint64_t *)(&V[j * 32]); for(k = 0; k < 16; k++) p1[k] ^= p2[k]; salsa20_8(&X[0], &X[16]); salsa20_8(&X[16], &X[0]); j = X[16] & 1023; p2 = (uint64_t *)(&V[j * 32]); for(k = 0; k < 16; k++) p1[k] ^= p2[k]; salsa20_8(&X[0], &X[16]); salsa20_8(&X[16], &X[0]); } PBKDF2_SHA256_80_128_32(input, X, ostate); } /* 131583 rounded up to 4 byte alignment */ #define SCRATCHBUF_SIZE (131584) void scrypt_regenhash(struct work *work) { uint32_t data[20]; char *scratchbuf; uint32_t *nonce = (uint32_t *)(work->data + 76); uint32_t *ohash = (uint32_t *)(work->hash); be32enc_vect(data, (const uint32_t *)work->data, 19); data[19] = htobe32(*nonce); scratchbuf = alloca(SCRATCHBUF_SIZE); scrypt_1024_1_1_256_sp(data, scratchbuf, ohash); flip32(ohash, ohash); } static const uint32_t diff1targ = 0x0000ffff; /* Used externally as confirmation of correct OCL code */ int scrypt_test(unsigned char *pdata, const unsigned char *ptarget, uint32_t nonce) { uint32_t tmp_hash7, Htarg = le32toh(((const uint32_t *)ptarget)[7]); uint32_t data[20], ohash[8]; char *scratchbuf; be32enc_vect(data, (const uint32_t *)pdata, 19); data[19] = htobe32(nonce); scratchbuf = alloca(SCRATCHBUF_SIZE); scrypt_1024_1_1_256_sp(data, scratchbuf, ohash); tmp_hash7 = be32toh(ohash[7]); applog(LOG_DEBUG, "htarget %08lx diff1 %08lx hash %08lx", (long unsigned int)Htarg, (long unsigned int)diff1targ, (long unsigned int)tmp_hash7); if (tmp_hash7 > diff1targ) return -1; if (tmp_hash7 > Htarg) return 0; return 1; } bool scanhash_scrypt(struct thr_info *thr, const unsigned char __maybe_unused *pmidstate, unsigned char *pdata, unsigned char __maybe_unused *phash1, unsigned char __maybe_unused *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n) { uint32_t *nonce = (uint32_t *)(pdata + 76); char *scratchbuf; uint32_t data[20]; uint32_t tmp_hash7; uint32_t Htarg = le32toh(((const uint32_t *)ptarget)[7]); bool ret = false; be32enc_vect(data, (const uint32_t *)pdata, 19); scratchbuf = malloc(SCRATCHBUF_SIZE); if (unlikely(!scratchbuf)) { applog(LOG_ERR, "Failed to malloc scratchbuf in scanhash_scrypt"); return ret; } while(1) { uint32_t ostate[8]; *nonce = ++n; data[19] = htobe32(n); scrypt_1024_1_1_256_sp(data, scratchbuf, ostate); tmp_hash7 = be32toh(ostate[7]); if (unlikely(tmp_hash7 <= Htarg)) { ((uint32_t *)pdata)[19] = htobe32(n); *last_nonce = n; ret = true; break; } if (unlikely((n >= max_nonce) || thr->work_restart)) { *last_nonce = n; break; } } free(scratchbuf);; return ret; } bfgminer-bfgminer-3.10.0/scrypt.h000066400000000000000000000010621226556647300166670ustar00rootroot00000000000000#ifndef SCRYPT_H #define SCRYPT_H #include #include "miner.h" #ifdef USE_SCRYPT extern int scrypt_test(unsigned char *pdata, const unsigned char *ptarget, uint32_t nonce); extern void scrypt_regenhash(struct work *work); #else /* USE_SCRYPT */ static inline int scrypt_test(__maybe_unused unsigned char *pdata, __maybe_unused const unsigned char *ptarget, __maybe_unused uint32_t nonce) { return 0; } static inline void scrypt_regenhash(__maybe_unused struct work *work) { } #endif /* USE_SCRYPT */ #endif /* SCRYPT_H */ bfgminer-bfgminer-3.10.0/scrypt130511.cl000066400000000000000000000564211226556647300175220ustar00rootroot00000000000000/*- * Copyright 2009 Colin Percival, 2011 ArtForz, 2011 pooler, 2012 mtrlt, * 2012-2013 Con Kolivas. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This file was originally written by Colin Percival as part of the Tarsnap * online backup system. */ __constant uint ES[2] = { 0x00FF00FF, 0xFF00FF00 }; __constant uint K[] = { 0x428a2f98U, 0x71374491U, 0xb5c0fbcfU, 0xe9b5dba5U, 0x3956c25bU, 0x59f111f1U, 0x923f82a4U, 0xab1c5ed5U, 0xd807aa98U, 0x12835b01U, 0x243185beU, // 10 0x550c7dc3U, 0x72be5d74U, 0x80deb1feU, 0x9bdc06a7U, 0xe49b69c1U, 0xefbe4786U, 0x0fc19dc6U, 0x240ca1ccU, 0x2de92c6fU, 0x4a7484aaU, // 20 0x5cb0a9dcU, 0x76f988daU, 0x983e5152U, 0xa831c66dU, 0xb00327c8U, 0xbf597fc7U, 0xc6e00bf3U, 0xd5a79147U, 0x06ca6351U, 0x14292967U, // 30 0x27b70a85U, 0x2e1b2138U, 0x4d2c6dfcU, 0x53380d13U, 0x650a7354U, 0x766a0abbU, 0x81c2c92eU, 0x92722c85U, 0xa2bfe8a1U, 0xa81a664bU, // 40 0xc24b8b70U, 0xc76c51a3U, 0xd192e819U, 0xd6990624U, 0xf40e3585U, 0x106aa070U, 0x19a4c116U, 0x1e376c08U, 0x2748774cU, 0x34b0bcb5U, // 50 0x391c0cb3U, 0x4ed8aa4aU, 0x5b9cca4fU, 0x682e6ff3U, 0x748f82eeU, 0x78a5636fU, 0x84c87814U, 0x8cc70208U, 0x90befffaU, 0xa4506cebU, // 60 0xbef9a3f7U, 0xc67178f2U, 0x98c7e2a2U, 0xfc08884dU, 0xcd2a11aeU, 0x510e527fU, 0x9b05688cU, 0xC3910C8EU, 0xfb6feee7U, 0x2a01a605U, // 70 0x0c2e12e0U, 0x4498517BU, 0x6a09e667U, 0xa4ce148bU, 0x95F61999U, 0xc19bf174U, 0xBB67AE85U, 0x3C6EF372U, 0xA54FF53AU, 0x1F83D9ABU, // 80 0x5BE0CD19U, 0x5C5C5C5CU, 0x36363636U, 0x80000000U, 0x000003FFU, 0x00000280U, 0x000004a0U, 0x00000300U }; #define rotl(x,y) rotate(x,y) #define Ch(x,y,z) bitselect(z,y,x) #define Maj(x,y,z) Ch((x^z),y,z) #define EndianSwap(n) (rotl(n & ES[0], 24U)|rotl(n & ES[1], 8U)) #define Tr2(x) (rotl(x, 30U) ^ rotl(x, 19U) ^ rotl(x, 10U)) #define Tr1(x) (rotl(x, 26U) ^ rotl(x, 21U) ^ rotl(x, 7U)) #define Wr2(x) (rotl(x, 25U) ^ rotl(x, 14U) ^ (x>>3U)) #define Wr1(x) (rotl(x, 15U) ^ rotl(x, 13U) ^ (x>>10U)) #define RND(a, b, c, d, e, f, g, h, k) \ h += Tr1(e); \ h += Ch(e, f, g); \ h += k; \ d += h; \ h += Tr2(a); \ h += Maj(a, b, c); void SHA256(uint4*restrict state0,uint4*restrict state1, const uint4 block0, const uint4 block1, const uint4 block2, const uint4 block3) { uint4 S0 = *state0; uint4 S1 = *state1; #define A S0.x #define B S0.y #define C S0.z #define D S0.w #define E S1.x #define F S1.y #define G S1.z #define H S1.w uint4 W[4]; W[ 0].x = block0.x; RND(A,B,C,D,E,F,G,H, W[0].x+ K[0]); W[ 0].y = block0.y; RND(H,A,B,C,D,E,F,G, W[0].y+ K[1]); W[ 0].z = block0.z; RND(G,H,A,B,C,D,E,F, W[0].z+ K[2]); W[ 0].w = block0.w; RND(F,G,H,A,B,C,D,E, W[0].w+ K[3]); W[ 1].x = block1.x; RND(E,F,G,H,A,B,C,D, W[1].x+ K[4]); W[ 1].y = block1.y; RND(D,E,F,G,H,A,B,C, W[1].y+ K[5]); W[ 1].z = block1.z; RND(C,D,E,F,G,H,A,B, W[1].z+ K[6]); W[ 1].w = block1.w; RND(B,C,D,E,F,G,H,A, W[1].w+ K[7]); W[ 2].x = block2.x; RND(A,B,C,D,E,F,G,H, W[2].x+ K[8]); W[ 2].y = block2.y; RND(H,A,B,C,D,E,F,G, W[2].y+ K[9]); W[ 2].z = block2.z; RND(G,H,A,B,C,D,E,F, W[2].z+ K[10]); W[ 2].w = block2.w; RND(F,G,H,A,B,C,D,E, W[2].w+ K[11]); W[ 3].x = block3.x; RND(E,F,G,H,A,B,C,D, W[3].x+ K[12]); W[ 3].y = block3.y; RND(D,E,F,G,H,A,B,C, W[3].y+ K[13]); W[ 3].z = block3.z; RND(C,D,E,F,G,H,A,B, W[3].z+ K[14]); W[ 3].w = block3.w; RND(B,C,D,E,F,G,H,A, W[3].w+ K[76]); W[ 0].x += Wr1(W[ 3].z) + W[ 2].y + Wr2(W[ 0].y); RND(A,B,C,D,E,F,G,H, W[0].x+ K[15]); W[ 0].y += Wr1(W[ 3].w) + W[ 2].z + Wr2(W[ 0].z); RND(H,A,B,C,D,E,F,G, W[0].y+ K[16]); W[ 0].z += Wr1(W[ 0].x) + W[ 2].w + Wr2(W[ 0].w); RND(G,H,A,B,C,D,E,F, W[0].z+ K[17]); W[ 0].w += Wr1(W[ 0].y) + W[ 3].x + Wr2(W[ 1].x); RND(F,G,H,A,B,C,D,E, W[0].w+ K[18]); W[ 1].x += Wr1(W[ 0].z) + W[ 3].y + Wr2(W[ 1].y); RND(E,F,G,H,A,B,C,D, W[1].x+ K[19]); W[ 1].y += Wr1(W[ 0].w) + W[ 3].z + Wr2(W[ 1].z); RND(D,E,F,G,H,A,B,C, W[1].y+ K[20]); W[ 1].z += Wr1(W[ 1].x) + W[ 3].w + Wr2(W[ 1].w); RND(C,D,E,F,G,H,A,B, W[1].z+ K[21]); W[ 1].w += Wr1(W[ 1].y) + W[ 0].x + Wr2(W[ 2].x); RND(B,C,D,E,F,G,H,A, W[1].w+ K[22]); W[ 2].x += Wr1(W[ 1].z) + W[ 0].y + Wr2(W[ 2].y); RND(A,B,C,D,E,F,G,H, W[2].x+ K[23]); W[ 2].y += Wr1(W[ 1].w) + W[ 0].z + Wr2(W[ 2].z); RND(H,A,B,C,D,E,F,G, W[2].y+ K[24]); W[ 2].z += Wr1(W[ 2].x) + W[ 0].w + Wr2(W[ 2].w); RND(G,H,A,B,C,D,E,F, W[2].z+ K[25]); W[ 2].w += Wr1(W[ 2].y) + W[ 1].x + Wr2(W[ 3].x); RND(F,G,H,A,B,C,D,E, W[2].w+ K[26]); W[ 3].x += Wr1(W[ 2].z) + W[ 1].y + Wr2(W[ 3].y); RND(E,F,G,H,A,B,C,D, W[3].x+ K[27]); W[ 3].y += Wr1(W[ 2].w) + W[ 1].z + Wr2(W[ 3].z); RND(D,E,F,G,H,A,B,C, W[3].y+ K[28]); W[ 3].z += Wr1(W[ 3].x) + W[ 1].w + Wr2(W[ 3].w); RND(C,D,E,F,G,H,A,B, W[3].z+ K[29]); W[ 3].w += Wr1(W[ 3].y) + W[ 2].x + Wr2(W[ 0].x); RND(B,C,D,E,F,G,H,A, W[3].w+ K[30]); W[ 0].x += Wr1(W[ 3].z) + W[ 2].y + Wr2(W[ 0].y); RND(A,B,C,D,E,F,G,H, W[0].x+ K[31]); W[ 0].y += Wr1(W[ 3].w) + W[ 2].z + Wr2(W[ 0].z); RND(H,A,B,C,D,E,F,G, W[0].y+ K[32]); W[ 0].z += Wr1(W[ 0].x) + W[ 2].w + Wr2(W[ 0].w); RND(G,H,A,B,C,D,E,F, W[0].z+ K[33]); W[ 0].w += Wr1(W[ 0].y) + W[ 3].x + Wr2(W[ 1].x); RND(F,G,H,A,B,C,D,E, W[0].w+ K[34]); W[ 1].x += Wr1(W[ 0].z) + W[ 3].y + Wr2(W[ 1].y); RND(E,F,G,H,A,B,C,D, W[1].x+ K[35]); W[ 1].y += Wr1(W[ 0].w) + W[ 3].z + Wr2(W[ 1].z); RND(D,E,F,G,H,A,B,C, W[1].y+ K[36]); W[ 1].z += Wr1(W[ 1].x) + W[ 3].w + Wr2(W[ 1].w); RND(C,D,E,F,G,H,A,B, W[1].z+ K[37]); W[ 1].w += Wr1(W[ 1].y) + W[ 0].x + Wr2(W[ 2].x); RND(B,C,D,E,F,G,H,A, W[1].w+ K[38]); W[ 2].x += Wr1(W[ 1].z) + W[ 0].y + Wr2(W[ 2].y); RND(A,B,C,D,E,F,G,H, W[2].x+ K[39]); W[ 2].y += Wr1(W[ 1].w) + W[ 0].z + Wr2(W[ 2].z); RND(H,A,B,C,D,E,F,G, W[2].y+ K[40]); W[ 2].z += Wr1(W[ 2].x) + W[ 0].w + Wr2(W[ 2].w); RND(G,H,A,B,C,D,E,F, W[2].z+ K[41]); W[ 2].w += Wr1(W[ 2].y) + W[ 1].x + Wr2(W[ 3].x); RND(F,G,H,A,B,C,D,E, W[2].w+ K[42]); W[ 3].x += Wr1(W[ 2].z) + W[ 1].y + Wr2(W[ 3].y); RND(E,F,G,H,A,B,C,D, W[3].x+ K[43]); W[ 3].y += Wr1(W[ 2].w) + W[ 1].z + Wr2(W[ 3].z); RND(D,E,F,G,H,A,B,C, W[3].y+ K[44]); W[ 3].z += Wr1(W[ 3].x) + W[ 1].w + Wr2(W[ 3].w); RND(C,D,E,F,G,H,A,B, W[3].z+ K[45]); W[ 3].w += Wr1(W[ 3].y) + W[ 2].x + Wr2(W[ 0].x); RND(B,C,D,E,F,G,H,A, W[3].w+ K[46]); W[ 0].x += Wr1(W[ 3].z) + W[ 2].y + Wr2(W[ 0].y); RND(A,B,C,D,E,F,G,H, W[0].x+ K[47]); W[ 0].y += Wr1(W[ 3].w) + W[ 2].z + Wr2(W[ 0].z); RND(H,A,B,C,D,E,F,G, W[0].y+ K[48]); W[ 0].z += Wr1(W[ 0].x) + W[ 2].w + Wr2(W[ 0].w); RND(G,H,A,B,C,D,E,F, W[0].z+ K[49]); W[ 0].w += Wr1(W[ 0].y) + W[ 3].x + Wr2(W[ 1].x); RND(F,G,H,A,B,C,D,E, W[0].w+ K[50]); W[ 1].x += Wr1(W[ 0].z) + W[ 3].y + Wr2(W[ 1].y); RND(E,F,G,H,A,B,C,D, W[1].x+ K[51]); W[ 1].y += Wr1(W[ 0].w) + W[ 3].z + Wr2(W[ 1].z); RND(D,E,F,G,H,A,B,C, W[1].y+ K[52]); W[ 1].z += Wr1(W[ 1].x) + W[ 3].w + Wr2(W[ 1].w); RND(C,D,E,F,G,H,A,B, W[1].z+ K[53]); W[ 1].w += Wr1(W[ 1].y) + W[ 0].x + Wr2(W[ 2].x); RND(B,C,D,E,F,G,H,A, W[1].w+ K[54]); W[ 2].x += Wr1(W[ 1].z) + W[ 0].y + Wr2(W[ 2].y); RND(A,B,C,D,E,F,G,H, W[2].x+ K[55]); W[ 2].y += Wr1(W[ 1].w) + W[ 0].z + Wr2(W[ 2].z); RND(H,A,B,C,D,E,F,G, W[2].y+ K[56]); W[ 2].z += Wr1(W[ 2].x) + W[ 0].w + Wr2(W[ 2].w); RND(G,H,A,B,C,D,E,F, W[2].z+ K[57]); W[ 2].w += Wr1(W[ 2].y) + W[ 1].x + Wr2(W[ 3].x); RND(F,G,H,A,B,C,D,E, W[2].w+ K[58]); W[ 3].x += Wr1(W[ 2].z) + W[ 1].y + Wr2(W[ 3].y); RND(E,F,G,H,A,B,C,D, W[3].x+ K[59]); W[ 3].y += Wr1(W[ 2].w) + W[ 1].z + Wr2(W[ 3].z); RND(D,E,F,G,H,A,B,C, W[3].y+ K[60]); W[ 3].z += Wr1(W[ 3].x) + W[ 1].w + Wr2(W[ 3].w); RND(C,D,E,F,G,H,A,B, W[3].z+ K[61]); W[ 3].w += Wr1(W[ 3].y) + W[ 2].x + Wr2(W[ 0].x); RND(B,C,D,E,F,G,H,A, W[3].w+ K[62]); #undef A #undef B #undef C #undef D #undef E #undef F #undef G #undef H *state0 += S0; *state1 += S1; } void SHA256_fresh(uint4*restrict state0,uint4*restrict state1, const uint4 block0, const uint4 block1, const uint4 block2, const uint4 block3) { #define A (*state0).x #define B (*state0).y #define C (*state0).z #define D (*state0).w #define E (*state1).x #define F (*state1).y #define G (*state1).z #define H (*state1).w uint4 W[4]; W[0].x = block0.x; D= K[63] +W[0].x; H= K[64] +W[0].x; W[0].y = block0.y; C= K[65] +Tr1(D)+Ch(D, K[66], K[67])+W[0].y; G= K[68] +C+Tr2(H)+Ch(H, K[69] ,K[70]); W[0].z = block0.z; B= K[71] +Tr1(C)+Ch(C,D,K[66])+W[0].z; F= K[72] +B+Tr2(G)+Maj(G,H, K[73]); W[0].w = block0.w; A= K[74] +Tr1(B)+Ch(B,C,D)+W[0].w; E= K[75] +A+Tr2(F)+Maj(F,G,H); W[1].x = block1.x; RND(E,F,G,H,A,B,C,D, W[1].x+ K[4]); W[1].y = block1.y; RND(D,E,F,G,H,A,B,C, W[1].y+ K[5]); W[1].z = block1.z; RND(C,D,E,F,G,H,A,B, W[1].z+ K[6]); W[1].w = block1.w; RND(B,C,D,E,F,G,H,A, W[1].w+ K[7]); W[2].x = block2.x; RND(A,B,C,D,E,F,G,H, W[2].x+ K[8]); W[2].y = block2.y; RND(H,A,B,C,D,E,F,G, W[2].y+ K[9]); W[2].z = block2.z; RND(G,H,A,B,C,D,E,F, W[2].z+ K[10]); W[2].w = block2.w; RND(F,G,H,A,B,C,D,E, W[2].w+ K[11]); W[3].x = block3.x; RND(E,F,G,H,A,B,C,D, W[3].x+ K[12]); W[3].y = block3.y; RND(D,E,F,G,H,A,B,C, W[3].y+ K[13]); W[3].z = block3.z; RND(C,D,E,F,G,H,A,B, W[3].z+ K[14]); W[3].w = block3.w; RND(B,C,D,E,F,G,H,A, W[3].w+ K[76]); W[0].x += Wr1(W[3].z) + W[2].y + Wr2(W[0].y); RND(A,B,C,D,E,F,G,H, W[0].x+ K[15]); W[0].y += Wr1(W[3].w) + W[2].z + Wr2(W[0].z); RND(H,A,B,C,D,E,F,G, W[0].y+ K[16]); W[0].z += Wr1(W[0].x) + W[2].w + Wr2(W[0].w); RND(G,H,A,B,C,D,E,F, W[0].z+ K[17]); W[0].w += Wr1(W[0].y) + W[3].x + Wr2(W[1].x); RND(F,G,H,A,B,C,D,E, W[0].w+ K[18]); W[1].x += Wr1(W[0].z) + W[3].y + Wr2(W[1].y); RND(E,F,G,H,A,B,C,D, W[1].x+ K[19]); W[1].y += Wr1(W[0].w) + W[3].z + Wr2(W[1].z); RND(D,E,F,G,H,A,B,C, W[1].y+ K[20]); W[1].z += Wr1(W[1].x) + W[3].w + Wr2(W[1].w); RND(C,D,E,F,G,H,A,B, W[1].z+ K[21]); W[1].w += Wr1(W[1].y) + W[0].x + Wr2(W[2].x); RND(B,C,D,E,F,G,H,A, W[1].w+ K[22]); W[2].x += Wr1(W[1].z) + W[0].y + Wr2(W[2].y); RND(A,B,C,D,E,F,G,H, W[2].x+ K[23]); W[2].y += Wr1(W[1].w) + W[0].z + Wr2(W[2].z); RND(H,A,B,C,D,E,F,G, W[2].y+ K[24]); W[2].z += Wr1(W[2].x) + W[0].w + Wr2(W[2].w); RND(G,H,A,B,C,D,E,F, W[2].z+ K[25]); W[2].w += Wr1(W[2].y) + W[1].x + Wr2(W[3].x); RND(F,G,H,A,B,C,D,E, W[2].w+ K[26]); W[3].x += Wr1(W[2].z) + W[1].y + Wr2(W[3].y); RND(E,F,G,H,A,B,C,D, W[3].x+ K[27]); W[3].y += Wr1(W[2].w) + W[1].z + Wr2(W[3].z); RND(D,E,F,G,H,A,B,C, W[3].y+ K[28]); W[3].z += Wr1(W[3].x) + W[1].w + Wr2(W[3].w); RND(C,D,E,F,G,H,A,B, W[3].z+ K[29]); W[3].w += Wr1(W[3].y) + W[2].x + Wr2(W[0].x); RND(B,C,D,E,F,G,H,A, W[3].w+ K[30]); W[0].x += Wr1(W[3].z) + W[2].y + Wr2(W[0].y); RND(A,B,C,D,E,F,G,H, W[0].x+ K[31]); W[0].y += Wr1(W[3].w) + W[2].z + Wr2(W[0].z); RND(H,A,B,C,D,E,F,G, W[0].y+ K[32]); W[0].z += Wr1(W[0].x) + W[2].w + Wr2(W[0].w); RND(G,H,A,B,C,D,E,F, W[0].z+ K[33]); W[0].w += Wr1(W[0].y) + W[3].x + Wr2(W[1].x); RND(F,G,H,A,B,C,D,E, W[0].w+ K[34]); W[1].x += Wr1(W[0].z) + W[3].y + Wr2(W[1].y); RND(E,F,G,H,A,B,C,D, W[1].x+ K[35]); W[1].y += Wr1(W[0].w) + W[3].z + Wr2(W[1].z); RND(D,E,F,G,H,A,B,C, W[1].y+ K[36]); W[1].z += Wr1(W[1].x) + W[3].w + Wr2(W[1].w); RND(C,D,E,F,G,H,A,B, W[1].z+ K[37]); W[1].w += Wr1(W[1].y) + W[0].x + Wr2(W[2].x); RND(B,C,D,E,F,G,H,A, W[1].w+ K[38]); W[2].x += Wr1(W[1].z) + W[0].y + Wr2(W[2].y); RND(A,B,C,D,E,F,G,H, W[2].x+ K[39]); W[2].y += Wr1(W[1].w) + W[0].z + Wr2(W[2].z); RND(H,A,B,C,D,E,F,G, W[2].y+ K[40]); W[2].z += Wr1(W[2].x) + W[0].w + Wr2(W[2].w); RND(G,H,A,B,C,D,E,F, W[2].z+ K[41]); W[2].w += Wr1(W[2].y) + W[1].x + Wr2(W[3].x); RND(F,G,H,A,B,C,D,E, W[2].w+ K[42]); W[3].x += Wr1(W[2].z) + W[1].y + Wr2(W[3].y); RND(E,F,G,H,A,B,C,D, W[3].x+ K[43]); W[3].y += Wr1(W[2].w) + W[1].z + Wr2(W[3].z); RND(D,E,F,G,H,A,B,C, W[3].y+ K[44]); W[3].z += Wr1(W[3].x) + W[1].w + Wr2(W[3].w); RND(C,D,E,F,G,H,A,B, W[3].z+ K[45]); W[3].w += Wr1(W[3].y) + W[2].x + Wr2(W[0].x); RND(B,C,D,E,F,G,H,A, W[3].w+ K[46]); W[0].x += Wr1(W[3].z) + W[2].y + Wr2(W[0].y); RND(A,B,C,D,E,F,G,H, W[0].x+ K[47]); W[0].y += Wr1(W[3].w) + W[2].z + Wr2(W[0].z); RND(H,A,B,C,D,E,F,G, W[0].y+ K[48]); W[0].z += Wr1(W[0].x) + W[2].w + Wr2(W[0].w); RND(G,H,A,B,C,D,E,F, W[0].z+ K[49]); W[0].w += Wr1(W[0].y) + W[3].x + Wr2(W[1].x); RND(F,G,H,A,B,C,D,E, W[0].w+ K[50]); W[1].x += Wr1(W[0].z) + W[3].y + Wr2(W[1].y); RND(E,F,G,H,A,B,C,D, W[1].x+ K[51]); W[1].y += Wr1(W[0].w) + W[3].z + Wr2(W[1].z); RND(D,E,F,G,H,A,B,C, W[1].y+ K[52]); W[1].z += Wr1(W[1].x) + W[3].w + Wr2(W[1].w); RND(C,D,E,F,G,H,A,B, W[1].z+ K[53]); W[1].w += Wr1(W[1].y) + W[0].x + Wr2(W[2].x); RND(B,C,D,E,F,G,H,A, W[1].w+ K[54]); W[2].x += Wr1(W[1].z) + W[0].y + Wr2(W[2].y); RND(A,B,C,D,E,F,G,H, W[2].x+ K[55]); W[2].y += Wr1(W[1].w) + W[0].z + Wr2(W[2].z); RND(H,A,B,C,D,E,F,G, W[2].y+ K[56]); W[2].z += Wr1(W[2].x) + W[0].w + Wr2(W[2].w); RND(G,H,A,B,C,D,E,F, W[2].z+ K[57]); W[2].w += Wr1(W[2].y) + W[1].x + Wr2(W[3].x); RND(F,G,H,A,B,C,D,E, W[2].w+ K[58]); W[3].x += Wr1(W[2].z) + W[1].y + Wr2(W[3].y); RND(E,F,G,H,A,B,C,D, W[3].x+ K[59]); W[3].y += Wr1(W[2].w) + W[1].z + Wr2(W[3].z); RND(D,E,F,G,H,A,B,C, W[3].y+ K[60]); W[3].z += Wr1(W[3].x) + W[1].w + Wr2(W[3].w); RND(C,D,E,F,G,H,A,B, W[3].z+ K[61]); W[3].w += Wr1(W[3].y) + W[2].x + Wr2(W[0].x); RND(B,C,D,E,F,G,H,A, W[3].w+ K[62]); #undef A #undef B #undef C #undef D #undef E #undef F #undef G #undef H *state0 += (uint4)(K[73], K[77], K[78], K[79]); *state1 += (uint4)(K[66], K[67], K[80], K[81]); } __constant uint fixedW[64] = { 0x428a2f99,0xf1374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf794, 0xf59b89c2,0x73924787,0x23c6886e,0xa42ca65c,0x15ed3627,0x4d6edcbf,0xe28217fc,0xef02488f, 0xb707775c,0x0468c23f,0xe7e72b4c,0x49e1f1a2,0x4b99c816,0x926d1570,0xaa0fc072,0xadb36e2c, 0xad87a3ea,0xbcb1d3a3,0x7b993186,0x562b9420,0xbff3ca0c,0xda4b0c23,0x6cd8711a,0x8f337caa, 0xc91b1417,0xc359dce1,0xa83253a7,0x3b13c12d,0x9d3d725d,0xd9031a84,0xb1a03340,0x16f58012, 0xe64fb6a2,0xe84d923a,0xe93a5730,0x09837686,0x078ff753,0x29833341,0xd5de0b7e,0x6948ccf4, 0xe0a1adbe,0x7c728e11,0x511c78e4,0x315b45bd,0xfca71413,0xea28f96a,0x79703128,0x4e1ef848, }; void SHA256_fixed(uint4*restrict state0,uint4*restrict state1) { uint4 S0 = *state0; uint4 S1 = *state1; #define A S0.x #define B S0.y #define C S0.z #define D S0.w #define E S1.x #define F S1.y #define G S1.z #define H S1.w RND(A,B,C,D,E,F,G,H, fixedW[0]); RND(H,A,B,C,D,E,F,G, fixedW[1]); RND(G,H,A,B,C,D,E,F, fixedW[2]); RND(F,G,H,A,B,C,D,E, fixedW[3]); RND(E,F,G,H,A,B,C,D, fixedW[4]); RND(D,E,F,G,H,A,B,C, fixedW[5]); RND(C,D,E,F,G,H,A,B, fixedW[6]); RND(B,C,D,E,F,G,H,A, fixedW[7]); RND(A,B,C,D,E,F,G,H, fixedW[8]); RND(H,A,B,C,D,E,F,G, fixedW[9]); RND(G,H,A,B,C,D,E,F, fixedW[10]); RND(F,G,H,A,B,C,D,E, fixedW[11]); RND(E,F,G,H,A,B,C,D, fixedW[12]); RND(D,E,F,G,H,A,B,C, fixedW[13]); RND(C,D,E,F,G,H,A,B, fixedW[14]); RND(B,C,D,E,F,G,H,A, fixedW[15]); RND(A,B,C,D,E,F,G,H, fixedW[16]); RND(H,A,B,C,D,E,F,G, fixedW[17]); RND(G,H,A,B,C,D,E,F, fixedW[18]); RND(F,G,H,A,B,C,D,E, fixedW[19]); RND(E,F,G,H,A,B,C,D, fixedW[20]); RND(D,E,F,G,H,A,B,C, fixedW[21]); RND(C,D,E,F,G,H,A,B, fixedW[22]); RND(B,C,D,E,F,G,H,A, fixedW[23]); RND(A,B,C,D,E,F,G,H, fixedW[24]); RND(H,A,B,C,D,E,F,G, fixedW[25]); RND(G,H,A,B,C,D,E,F, fixedW[26]); RND(F,G,H,A,B,C,D,E, fixedW[27]); RND(E,F,G,H,A,B,C,D, fixedW[28]); RND(D,E,F,G,H,A,B,C, fixedW[29]); RND(C,D,E,F,G,H,A,B, fixedW[30]); RND(B,C,D,E,F,G,H,A, fixedW[31]); RND(A,B,C,D,E,F,G,H, fixedW[32]); RND(H,A,B,C,D,E,F,G, fixedW[33]); RND(G,H,A,B,C,D,E,F, fixedW[34]); RND(F,G,H,A,B,C,D,E, fixedW[35]); RND(E,F,G,H,A,B,C,D, fixedW[36]); RND(D,E,F,G,H,A,B,C, fixedW[37]); RND(C,D,E,F,G,H,A,B, fixedW[38]); RND(B,C,D,E,F,G,H,A, fixedW[39]); RND(A,B,C,D,E,F,G,H, fixedW[40]); RND(H,A,B,C,D,E,F,G, fixedW[41]); RND(G,H,A,B,C,D,E,F, fixedW[42]); RND(F,G,H,A,B,C,D,E, fixedW[43]); RND(E,F,G,H,A,B,C,D, fixedW[44]); RND(D,E,F,G,H,A,B,C, fixedW[45]); RND(C,D,E,F,G,H,A,B, fixedW[46]); RND(B,C,D,E,F,G,H,A, fixedW[47]); RND(A,B,C,D,E,F,G,H, fixedW[48]); RND(H,A,B,C,D,E,F,G, fixedW[49]); RND(G,H,A,B,C,D,E,F, fixedW[50]); RND(F,G,H,A,B,C,D,E, fixedW[51]); RND(E,F,G,H,A,B,C,D, fixedW[52]); RND(D,E,F,G,H,A,B,C, fixedW[53]); RND(C,D,E,F,G,H,A,B, fixedW[54]); RND(B,C,D,E,F,G,H,A, fixedW[55]); RND(A,B,C,D,E,F,G,H, fixedW[56]); RND(H,A,B,C,D,E,F,G, fixedW[57]); RND(G,H,A,B,C,D,E,F, fixedW[58]); RND(F,G,H,A,B,C,D,E, fixedW[59]); RND(E,F,G,H,A,B,C,D, fixedW[60]); RND(D,E,F,G,H,A,B,C, fixedW[61]); RND(C,D,E,F,G,H,A,B, fixedW[62]); RND(B,C,D,E,F,G,H,A, fixedW[63]); #undef A #undef B #undef C #undef D #undef E #undef F #undef G #undef H *state0 += S0; *state1 += S1; } void shittify(uint4 B[8]) { uint4 tmp[4]; tmp[0] = (uint4)(B[1].x,B[2].y,B[3].z,B[0].w); tmp[1] = (uint4)(B[2].x,B[3].y,B[0].z,B[1].w); tmp[2] = (uint4)(B[3].x,B[0].y,B[1].z,B[2].w); tmp[3] = (uint4)(B[0].x,B[1].y,B[2].z,B[3].w); #pragma unroll for(uint i=0; i<4; ++i) B[i] = EndianSwap(tmp[i]); tmp[0] = (uint4)(B[5].x,B[6].y,B[7].z,B[4].w); tmp[1] = (uint4)(B[6].x,B[7].y,B[4].z,B[5].w); tmp[2] = (uint4)(B[7].x,B[4].y,B[5].z,B[6].w); tmp[3] = (uint4)(B[4].x,B[5].y,B[6].z,B[7].w); #pragma unroll for(uint i=0; i<4; ++i) B[i+4] = EndianSwap(tmp[i]); } void unshittify(uint4 B[8]) { uint4 tmp[4]; tmp[0] = (uint4)(B[3].x,B[2].y,B[1].z,B[0].w); tmp[1] = (uint4)(B[0].x,B[3].y,B[2].z,B[1].w); tmp[2] = (uint4)(B[1].x,B[0].y,B[3].z,B[2].w); tmp[3] = (uint4)(B[2].x,B[1].y,B[0].z,B[3].w); #pragma unroll for(uint i=0; i<4; ++i) B[i] = EndianSwap(tmp[i]); tmp[0] = (uint4)(B[7].x,B[6].y,B[5].z,B[4].w); tmp[1] = (uint4)(B[4].x,B[7].y,B[6].z,B[5].w); tmp[2] = (uint4)(B[5].x,B[4].y,B[7].z,B[6].w); tmp[3] = (uint4)(B[6].x,B[5].y,B[4].z,B[7].w); #pragma unroll for(uint i=0; i<4; ++i) B[i+4] = EndianSwap(tmp[i]); } void salsa(uint4 B[8]) { uint4 w[4]; #pragma unroll for(uint i=0; i<4; ++i) w[i] = (B[i]^=B[i+4]); #pragma unroll for(uint i=0; i<4; ++i) { w[0] ^= rotl(w[3] +w[2] , 7U); w[1] ^= rotl(w[0] +w[3] , 9U); w[2] ^= rotl(w[1] +w[0] ,13U); w[3] ^= rotl(w[2] +w[1] ,18U); w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U); w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U); w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U); w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U); } #pragma unroll for(uint i=0; i<4; ++i) w[i] = (B[i+4]^=(B[i]+=w[i])); #pragma unroll for(uint i=0; i<4; ++i) { w[0] ^= rotl(w[3] +w[2] , 7U); w[1] ^= rotl(w[0] +w[3] , 9U); w[2] ^= rotl(w[1] +w[0] ,13U); w[3] ^= rotl(w[2] +w[1] ,18U); w[2] ^= rotl(w[3].wxyz+w[0].zwxy, 7U); w[1] ^= rotl(w[2].wxyz+w[3].zwxy, 9U); w[0] ^= rotl(w[1].wxyz+w[2].zwxy,13U); w[3] ^= rotl(w[0].wxyz+w[1].zwxy,18U); } #pragma unroll for(uint i=0; i<4; ++i) B[i+4] += w[i]; } #define Coord(x,y,z) x+y*(x ## SIZE)+z*(y ## SIZE)*(x ## SIZE) #define CO Coord(z,x,y) void scrypt_core(uint4 X[8], __global uint4*restrict lookup) { shittify(X); const uint zSIZE = 8; const uint ySIZE = (1024/LOOKUP_GAP+(1024%LOOKUP_GAP>0)); const uint xSIZE = CONCURRENT_THREADS; uint x = get_global_id(0)%xSIZE; for(uint y=0; y<1024/LOOKUP_GAP; ++y) { #pragma unroll for(uint z=0; z #include #include "sha2.h" #define UNPACK32(x, str) \ { \ *((str) + 3) = (uint8_t) ((x) ); \ *((str) + 2) = (uint8_t) ((x) >> 8); \ *((str) + 1) = (uint8_t) ((x) >> 16); \ *((str) + 0) = (uint8_t) ((x) >> 24); \ } #define PACK32(str, x) \ { \ *(x) = ((uint32_t) *((str) + 3) ) \ | ((uint32_t) *((str) + 2) << 8) \ | ((uint32_t) *((str) + 1) << 16) \ | ((uint32_t) *((str) + 0) << 24); \ } #define SHA256_SCR(i) \ { \ w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \ + SHA256_F3(w[i - 15]) + w[i - 16]; \ } uint32_t sha256_h0[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; uint32_t sha256_k[64] = {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; /* SHA-256 functions */ void sha256_transf(sha256_ctx *ctx, const unsigned char *message, unsigned int block_nb) { uint32_t w[64]; uint32_t wv[8]; uint32_t t1, t2; const unsigned char *sub_block; int i; int j; for (i = 0; i < (int) block_nb; i++) { sub_block = message + (i << 6); for (j = 0; j < 16; j++) { PACK32(&sub_block[j << 2], &w[j]); } for (j = 16; j < 64; j++) { SHA256_SCR(j); } for (j = 0; j < 8; j++) { wv[j] = ctx->h[j]; } for (j = 0; j < 64; j++) { t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + sha256_k[j] + w[j]; t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); wv[7] = wv[6]; wv[6] = wv[5]; wv[5] = wv[4]; wv[4] = wv[3] + t1; wv[3] = wv[2]; wv[2] = wv[1]; wv[1] = wv[0]; wv[0] = t1 + t2; } for (j = 0; j < 8; j++) { ctx->h[j] += wv[j]; } } } void sha256(const unsigned char *message, unsigned int len, unsigned char *digest) { sha256_ctx ctx; sha256_init(&ctx); sha256_update(&ctx, message, len); sha256_final(&ctx, digest); } void sha256_init(sha256_ctx *ctx) { int i; for (i = 0; i < 8; i++) { ctx->h[i] = sha256_h0[i]; } ctx->len = 0; ctx->tot_len = 0; } void sha256_update(sha256_ctx *ctx, const unsigned char *message, unsigned int len) { unsigned int block_nb; unsigned int new_len, rem_len, tmp_len; const unsigned char *shifted_message; tmp_len = SHA256_BLOCK_SIZE - ctx->len; rem_len = len < tmp_len ? len : tmp_len; memcpy(&ctx->block[ctx->len], message, rem_len); if (ctx->len + len < SHA256_BLOCK_SIZE) { ctx->len += len; return; } new_len = len - rem_len; block_nb = new_len / SHA256_BLOCK_SIZE; shifted_message = message + rem_len; sha256_transf(ctx, ctx->block, 1); sha256_transf(ctx, shifted_message, block_nb); rem_len = new_len % SHA256_BLOCK_SIZE; memcpy(ctx->block, &shifted_message[block_nb << 6], rem_len); ctx->len = rem_len; ctx->tot_len += (block_nb + 1) << 6; } void sha256_final(sha256_ctx *ctx, unsigned char *digest) { unsigned int block_nb; unsigned int pm_len; unsigned int len_b; int i; block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) < (ctx->len % SHA256_BLOCK_SIZE))); len_b = (ctx->tot_len + ctx->len) << 3; pm_len = block_nb << 6; memset(ctx->block + ctx->len, 0, pm_len - ctx->len); ctx->block[ctx->len] = 0x80; UNPACK32(len_b, ctx->block + pm_len - 4); sha256_transf(ctx, ctx->block, block_nb); for (i = 0 ; i < 8; i++) { UNPACK32(ctx->h[i], &digest[i << 2]); } } bfgminer-bfgminer-3.10.0/sha2.h000066400000000000000000000053551226556647300162110ustar00rootroot00000000000000/* * FIPS 180-2 SHA-224/256/384/512 implementation * Last update: 02/02/2007 * Issue date: 04/30/2005 * * Copyright 2013 Con Kolivas * Copyright 2005, 2007 Olivier Gay * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "config.h" #include #include "miner.h" #ifndef SHA2_H #define SHA2_H #define SHA256_DIGEST_SIZE ( 256 / 8) #define SHA256_BLOCK_SIZE ( 512 / 8) #define SHFR(x, n) (x >> n) #define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) #define CH(x, y, z) ((x & y) ^ (~x & z)) #define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) #define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) #define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) #define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) #define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) typedef struct { unsigned int tot_len; unsigned int len; unsigned char block[2 * SHA256_BLOCK_SIZE]; uint32_t h[8]; } sha256_ctx; extern uint32_t sha256_k[64]; void sha256_init(sha256_ctx * ctx); void sha256_update(sha256_ctx *ctx, const unsigned char *message, unsigned int len); void sha256_final(sha256_ctx *ctx, unsigned char *digest); void sha256(const unsigned char *message, unsigned int len, unsigned char *digest); #endif /* !SHA2_H */ bfgminer-bfgminer-3.10.0/sha256_4way.c000066400000000000000000000511651226556647300173230ustar00rootroot00000000000000// Copyright 2012-2013 Luke Dashjr // Copyright 2010 Satoshi Nakamoto // Distributed under the MIT/X11 software license, see the accompanying // file license.txt or http://www.opensource.org/licenses/mit-license.php. // tcatm's 4-way 128-bit SSE2 SHA-256 #include "config.h" #include "driver-cpu.h" #ifdef WANT_SSE2_4WAY #include #include #include #include #include #include #define NPAR 32 static void DoubleBlockSHA256(const void* pin, void* pout, const void* pinit, unsigned int hash[8][NPAR], const void* init2); static const unsigned int sha256_consts[] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, /* 0 */ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, /* 8 */ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, /* 16 */ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, /* 24 */ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, /* 32 */ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, /* 40 */ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, /* 48 */ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, /* 56 */ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; static inline __m128i Ch(const __m128i b, const __m128i c, const __m128i d) { return _mm_xor_si128(_mm_and_si128(b,c),_mm_andnot_si128(b,d)); } static inline __m128i Maj(const __m128i b, const __m128i c, const __m128i d) { return _mm_xor_si128(_mm_xor_si128(_mm_and_si128(b,c),_mm_and_si128(b,d)),_mm_and_si128(c,d)); } static inline __m128i ROTR(__m128i x, const int n) { return _mm_or_si128(_mm_srli_epi32(x, n),_mm_slli_epi32(x, 32 - n)); } static inline __m128i SHR(__m128i x, const int n) { return _mm_srli_epi32(x, n); } /* SHA256 Functions */ #define BIGSIGMA0_256(x) (_mm_xor_si128(_mm_xor_si128(ROTR((x), 2),ROTR((x), 13)),ROTR((x), 22))) #define BIGSIGMA1_256(x) (_mm_xor_si128(_mm_xor_si128(ROTR((x), 6),ROTR((x), 11)),ROTR((x), 25))) #define SIGMA0_256(x) (_mm_xor_si128(_mm_xor_si128(ROTR((x), 7),ROTR((x), 18)), SHR((x), 3 ))) #define SIGMA1_256(x) (_mm_xor_si128(_mm_xor_si128(ROTR((x),17),ROTR((x), 19)), SHR((x), 10))) static inline unsigned int store32(const __m128i x, int i) { union { unsigned int ret[4]; __m128i x; } box; box.x = x; return box.ret[i]; } static inline void store_epi32(const __m128i x, unsigned int *x0, unsigned int *x1, unsigned int *x2, unsigned int *x3) { union { unsigned int ret[4]; __m128i x; } box; box.x = x; *x0 = box.ret[3]; *x1 = box.ret[2]; *x2 = box.ret[1]; *x3 = box.ret[0]; } #define add4(x0, x1, x2, x3) _mm_add_epi32(_mm_add_epi32(x0, x1),_mm_add_epi32( x2,x3)) #define add5(x0, x1, x2, x3, x4) _mm_add_epi32(add4(x0, x1, x2, x3), x4) #define SHA256ROUND(a, b, c, d, e, f, g, h, i, w) \ T1 = add5(h, BIGSIGMA1_256(e), Ch(e, f, g), _mm_set1_epi32(sha256_consts[i]), w); \ d = _mm_add_epi32(d, T1); \ h = _mm_add_epi32(T1, _mm_add_epi32(BIGSIGMA0_256(a), Maj(a, b, c))); static inline void dumpreg(__m128i x, char *msg) { union { unsigned int ret[4]; __m128i x; } box; box.x = x ; printf("%s %08x %08x %08x %08x\n", msg, box.ret[0], box.ret[1], box.ret[2], box.ret[3]); } #if 1 #define dumpstate(i) printf("%s: %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", \ __func__, store32(w0, i), store32(a, i), store32(b, i), store32(c, i), store32(d, i), store32(e, i), store32(f, i), store32(g, i), store32(h, i)); #else #define dumpstate() #endif static const unsigned int pSHA256InitState[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; bool ScanHash_4WaySSE2(struct thr_info*thr, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce) { uint32_t *hash32 = (uint32_t *)phash; unsigned int *nNonce_p = (unsigned int*)(pdata + 76); pdata += 64; for (;;) { unsigned int thash[9][NPAR] __attribute__((aligned(128))); int j; *nNonce_p = nonce; DoubleBlockSHA256(pdata, phash1, pmidstate, thash, pSHA256InitState); for (j = 0; j < NPAR; j++) { if (unlikely(thash[7][j] == 0)) { int i; for (i = 0; i < 32/4; i++) ((unsigned int*)phash)[i] = thash[i][j]; if (unlikely(hash32[7] == 0 && fulltest(phash, ptarget))) { nonce += j; *last_nonce = nonce; *nNonce_p = nonce; return true; } } } if ((nonce >= max_nonce) || thr->work_restart) { *last_nonce = nonce; return false; } nonce += NPAR; } } static void DoubleBlockSHA256(const void* pin, void* pad, const void *pre, unsigned int thash[9][NPAR], const void *init) { unsigned int* In = (unsigned int*)pin; unsigned int* Pad = (unsigned int*)pad; unsigned int* hPre = (unsigned int*)pre; unsigned int* hInit = (unsigned int*)init; unsigned int /* i, j, */ k; /* vectors used in calculation */ __m128i w0, w1, w2, w3, w4, w5, w6, w7; __m128i w8, w9, w10, w11, w12, w13, w14, w15; __m128i T1; __m128i a, b, c, d, e, f, g, h; __m128i nonce, preNonce; /* nonce offset for vector */ __m128i offset = _mm_set_epi32(0x00000003, 0x00000002, 0x00000001, 0x00000000); preNonce = _mm_add_epi32(_mm_set1_epi32(In[3]), offset); for(k = 0; k #include #include //#include #include #include #define NPAR 32 static void DoubleBlockSHA256(const void* pin, void* pout, const void* pinit, unsigned int hash[8][NPAR], const void* init2); static const unsigned int sha256_consts[] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, /* 0 */ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, /* 8 */ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, /* 16 */ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, /* 24 */ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, /* 32 */ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, /* 40 */ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, /* 48 */ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, /* 56 */ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; static inline vector unsigned int Ch(const vector unsigned int b, const vector unsigned int c, const vector unsigned int d) { return vec_sel(d,c,b); } static inline vector unsigned int Maj(const vector unsigned int b, const vector unsigned int c, const vector unsigned int d) { return vec_sel(b,c, vec_xor(b,d)); } /* RotateRight(x, n) := RotateLeft(x, 32-n) */ /* SHA256 Functions */ #define BIGSIGMA0_256(x) (vec_xor(vec_xor(vec_rl((x), (vector unsigned int)(32-2)),vec_rl((x), (vector unsigned int)(32-13))),vec_rl((x), (vector unsigned int)(32-22)))) #define BIGSIGMA1_256(x) (vec_xor(vec_xor(vec_rl((x), (vector unsigned int)(32-6)),vec_rl((x), (vector unsigned int)(32-11))),vec_rl((x), (vector unsigned int)(32-25)))) #define SIGMA0_256(x) (vec_xor(vec_xor(vec_rl((x), (vector unsigned int)(32- 7)),vec_rl((x), (vector unsigned int)(32-18))), vec_sr((x), (vector unsigned int)(3 )))) #define SIGMA1_256(x) (vec_xor(vec_xor(vec_rl((x), (vector unsigned int)(32-17)),vec_rl((x), (vector unsigned int)(32-19))), vec_sr((x), (vector unsigned int)(10)))) #define add4(x0, x1, x2, x3) vec_add(vec_add(x0, x1),vec_add( x2,x3)) #define add5(x0, x1, x2, x3, x4) vec_add(add4(x0, x1, x2, x3), x4) #define SHA256ROUND(a, b, c, d, e, f, g, h, i, w) \ T1 = add5(h, BIGSIGMA1_256(e), Ch(e, f, g), (vector unsigned int)(sha256_consts[i],sha256_consts[i],sha256_consts[i],sha256_consts[i]), w); \ d = vec_add(d, T1); \ h = vec_add(T1, vec_add(BIGSIGMA0_256(a), Maj(a, b, c))); static const unsigned int pSHA256InitState[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; bool ScanHash_altivec_4way(struct thr_info*thr, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce) { uint32_t *hash32 = (uint32_t *)phash; unsigned int *nNonce_p = (unsigned int*)(pdata + 76); pdata += 64; for (;;) { unsigned int thash[9][NPAR] __attribute__((aligned(128))); int j; *nNonce_p = nonce; DoubleBlockSHA256(pdata, phash1, pmidstate, thash, pSHA256InitState); for (j = 0; j < NPAR; j++) { if (unlikely(thash[7][j] == 0)) { int i; for (i = 0; i < 32/4; i++) ((unsigned int*)phash)[i] = thash[i][j]; if (unlikely(hash32[7] == 0 && fulltest(phash, ptarget))) { nonce += j; *last_nonce = nonce; *nNonce_p = nonce; return true; } } } if ((nonce >= max_nonce) || thr->work_restart) { *last_nonce = nonce; return false; } nonce += NPAR; } } static void DoubleBlockSHA256(const void* pin, void* pad, const void *pre, unsigned int thash[9][NPAR], const void *init) { unsigned int* In = (unsigned int*)pin; unsigned int* Pad = (unsigned int*)pad; unsigned int* hPre = (unsigned int*)pre; unsigned int* hInit = (unsigned int*)init; unsigned int /* i, j, */ k; /* vectors used in calculation */ vector unsigned int w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15; vector unsigned int T1; vector unsigned int a, b, c, d, e, f, g, h; vector unsigned int nonce, preNonce; /* nonce offset for vector */ vector unsigned int offset = (vector unsigned int)(0, 1, 2, 3); preNonce = vec_add((vector unsigned int)(In[3],In[3],In[3],In[3]), offset); for(k = 0; k try if it´s faster to compare the results with the target inside this function */ } } #endif /* WANT_ALTIVEC_4WAY */ bfgminer-bfgminer-3.10.0/sha256_cryptopp.c000066400000000000000000000400741226556647300203140ustar00rootroot00000000000000/* * Copyright 2010-2011 Jeff Garzik * Copyright 2002-2010 Wei Dai (released as public domain) * Copyright 2012-2013 Luke Dashjr * Copyright 2011 Con Kolivas * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include "miner.h" typedef uint32_t word32; static word32 rotrFixed(word32 word, unsigned int shift) { return (word >> shift) | (word << (32 - shift)); } #define blk0(i) (W[i] = data[i]) static const word32 SHA256_K[64] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; #define blk2(i) (W[i&15]+=s1(W[(i-2)&15])+W[(i-7)&15]+s0(W[(i-15)&15])) #define Ch(x,y,z) (z^(x&(y^z))) #define Maj(x,y,z) (y^((x^y)&(y^z))) #define a(i) T[(0-i)&7] #define b(i) T[(1-i)&7] #define c(i) T[(2-i)&7] #define d(i) T[(3-i)&7] #define e(i) T[(4-i)&7] #define f(i) T[(5-i)&7] #define g(i) T[(6-i)&7] #define h(i) T[(7-i)&7] #define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+SHA256_K[i+j]+(j?blk2(i):blk0(i));\ d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i)) // for SHA256 #define S0(x) (rotrFixed(x,2)^rotrFixed(x,13)^rotrFixed(x,22)) #define S1(x) (rotrFixed(x,6)^rotrFixed(x,11)^rotrFixed(x,25)) #define s0(x) (rotrFixed(x,7)^rotrFixed(x,18)^(x>>3)) #define s1(x) (rotrFixed(x,17)^rotrFixed(x,19)^(x>>10)) static void SHA256_Transform(word32 *state, const word32 *data) { word32 W[16] = { }; word32 T[8]; unsigned int j; /* Copy context->state[] to working vars */ memcpy(T, state, sizeof(T)); /* 64 operations, partially loop unrolled */ for (j=0; j<64; j+=16) { R( 0); R( 1); R( 2); R( 3); R( 4); R( 5); R( 6); R( 7); R( 8); R( 9); R(10); R(11); R(12); R(13); R(14); R(15); } /* Add the working vars back into context.state[] */ state[0] += a(0); state[1] += b(0); state[2] += c(0); state[3] += d(0); state[4] += e(0); state[5] += f(0); state[6] += g(0); state[7] += h(0); } static void runhash(void *state, const void *input, const void *init) { memcpy(state, init, 32); SHA256_Transform(state, input); } /* suspiciously similar to ScanHash* from bitcoin */ bool scanhash_cryptopp(struct thr_info*thr, const unsigned char *midstate, unsigned char *data, unsigned char *hash1, unsigned char *hash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n) { uint32_t *hash32 = (uint32_t *) hash; uint32_t *nonce = (uint32_t *)(data + 76); data += 64; // Midstate and data are stored in little endian LOCAL_swap32le(unsigned char, midstate, 32/4) LOCAL_swap32le(unsigned char, data, 64/4) uint32_t *nonce_w = (uint32_t *)(data + 12); while (1) { *nonce_w = n; runhash(hash1, data, midstate); runhash(hash, hash1, sha256_init_state); if (unlikely((hash32[7] == 0) && fulltest(hash, target))) { *nonce = htole32(n); *last_nonce = n; return true; } if ((n >= max_nonce) || thr->work_restart) { *nonce = htole32(n); *last_nonce = n; return false; } n++; } } #if defined(WANT_CRYPTOPP_ASM32) #define CRYPTOPP_FASTCALL #define CRYPTOPP_BOOL_X86 1 #define CRYPTOPP_BOOL_X64 0 #define CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE 0 #ifdef CRYPTOPP_GENERATE_X64_MASM #define AS1(x) x*newline* #define AS2(x, y) x, y*newline* #define AS3(x, y, z) x, y, z*newline* #define ASS(x, y, a, b, c, d) x, y, a*64+b*16+c*4+d*newline* #define ASL(x) label##x:*newline* #define ASJ(x, y, z) x label##y*newline* #define ASC(x, y) x label##y*newline* #define AS_HEX(y) 0##y##h #elif defined(_MSC_VER) || defined(__BORLANDC__) #define CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY #define AS1(x) __asm {x} #define AS2(x, y) __asm {x, y} #define AS3(x, y, z) __asm {x, y, z} #define ASS(x, y, a, b, c, d) __asm {x, y, (a)*64+(b)*16+(c)*4+(d)} #define ASL(x) __asm {label##x:} #define ASJ(x, y, z) __asm {x label##y} #define ASC(x, y) __asm {x label##y} #define CRYPTOPP_NAKED __declspec(naked) #define AS_HEX(y) 0x##y #else #define CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY // define these in two steps to allow arguments to be expanded #define GNU_AS1(x) #x ";" #define GNU_AS2(x, y) #x ", " #y ";" #define GNU_AS3(x, y, z) #x ", " #y ", " #z ";" #define GNU_ASL(x) "\n" #x ":" #define GNU_ASJ(x, y, z) #x " " #y #z ";" #define AS1(x) GNU_AS1(x) #define AS2(x, y) GNU_AS2(x, y) #define AS3(x, y, z) GNU_AS3(x, y, z) #define ASS(x, y, a, b, c, d) #x ", " #y ", " #a "*64+" #b "*16+" #c "*4+" #d ";" #define ASL(x) GNU_ASL(x) #define ASJ(x, y, z) GNU_ASJ(x, y, z) #define ASC(x, y) #x " " #y ";" #define CRYPTOPP_NAKED #define AS_HEX(y) 0x##y #endif #define IF0(y) #define IF1(y) y #ifdef CRYPTOPP_GENERATE_X64_MASM #define ASM_MOD(x, y) ((x) MOD (y)) #define XMMWORD_PTR XMMWORD PTR #else // GNU assembler doesn't seem to have mod operator #define ASM_MOD(x, y) ((x)-((x)/(y))*(y)) // GAS 2.15 doesn't support XMMWORD PTR. it seems necessary only for MASM #define XMMWORD_PTR #endif #if CRYPTOPP_BOOL_X86 #define AS_REG_1 ecx #define AS_REG_2 edx #define AS_REG_3 esi #define AS_REG_4 edi #define AS_REG_5 eax #define AS_REG_6 ebx #define AS_REG_7 ebp #define AS_REG_1d ecx #define AS_REG_2d edx #define AS_REG_3d esi #define AS_REG_4d edi #define AS_REG_5d eax #define AS_REG_6d ebx #define AS_REG_7d ebp #define WORD_SZ 4 #define WORD_REG(x) e##x #define WORD_PTR DWORD PTR #define AS_PUSH_IF86(x) AS1(push e##x) #define AS_POP_IF86(x) AS1(pop e##x) #define AS_JCXZ jecxz #elif CRYPTOPP_BOOL_X64 #ifdef CRYPTOPP_GENERATE_X64_MASM #define AS_REG_1 rcx #define AS_REG_2 rdx #define AS_REG_3 r8 #define AS_REG_4 r9 #define AS_REG_5 rax #define AS_REG_6 r10 #define AS_REG_7 r11 #define AS_REG_1d ecx #define AS_REG_2d edx #define AS_REG_3d r8d #define AS_REG_4d r9d #define AS_REG_5d eax #define AS_REG_6d r10d #define AS_REG_7d r11d #else #define AS_REG_1 rdi #define AS_REG_2 rsi #define AS_REG_3 rdx #define AS_REG_4 rcx #define AS_REG_5 r8 #define AS_REG_6 r9 #define AS_REG_7 r10 #define AS_REG_1d edi #define AS_REG_2d esi #define AS_REG_3d edx #define AS_REG_4d ecx #define AS_REG_5d r8d #define AS_REG_6d r9d #define AS_REG_7d r10d #endif #define WORD_SZ 8 #define WORD_REG(x) r##x #define WORD_PTR QWORD PTR #define AS_PUSH_IF86(x) #define AS_POP_IF86(x) #define AS_JCXZ jrcxz #endif static void CRYPTOPP_FASTCALL X86_SHA256_HashBlocks(word32 *state, const word32 *data, size_t len #if defined(_MSC_VER) && (_MSC_VER == 1200) , ... // VC60 workaround: prevent VC 6 from inlining this function #endif ) { #if defined(_MSC_VER) && (_MSC_VER == 1200) AS2(mov ecx, [state]) AS2(mov edx, [data]) #endif #define LOCALS_SIZE 8*4 + 16*4 + 4*WORD_SZ #define H(i) [BASE+ASM_MOD(1024+7-(i),8)*4] #define G(i) H(i+1) #define F(i) H(i+2) #define E(i) H(i+3) #define D(i) H(i+4) #define C(i) H(i+5) #define B(i) H(i+6) #define A(i) H(i+7) #define Wt(i) BASE+8*4+ASM_MOD(1024+15-(i),16)*4 #define Wt_2(i) Wt((i)-2) #define Wt_15(i) Wt((i)-15) #define Wt_7(i) Wt((i)-7) #define K_END [BASE+8*4+16*4+0*WORD_SZ] #define STATE_SAVE [BASE+8*4+16*4+1*WORD_SZ] #define DATA_SAVE [BASE+8*4+16*4+2*WORD_SZ] #define DATA_END [BASE+8*4+16*4+3*WORD_SZ] #define Kt(i) WORD_REG(si)+(i)*4 #if CRYPTOPP_BOOL_X86 #define BASE esp+4 #elif defined(__GNUC__) #define BASE r8 #else #define BASE rsp #endif #define RA0(i, edx, edi) \ AS2( add edx, [Kt(i)] )\ AS2( add edx, [Wt(i)] )\ AS2( add edx, H(i) )\ #define RA1(i, edx, edi) #define RB0(i, edx, edi) #define RB1(i, edx, edi) \ AS2( mov AS_REG_7d, [Wt_2(i)] )\ AS2( mov edi, [Wt_15(i)])\ AS2( mov ebx, AS_REG_7d )\ AS2( shr AS_REG_7d, 10 )\ AS2( ror ebx, 17 )\ AS2( xor AS_REG_7d, ebx )\ AS2( ror ebx, 2 )\ AS2( xor ebx, AS_REG_7d )/* s1(W_t-2) */\ AS2( add ebx, [Wt_7(i)])\ AS2( mov AS_REG_7d, edi )\ AS2( shr AS_REG_7d, 3 )\ AS2( ror edi, 7 )\ AS2( add ebx, [Wt(i)])/* s1(W_t-2) + W_t-7 + W_t-16 */\ AS2( xor AS_REG_7d, edi )\ AS2( add edx, [Kt(i)])\ AS2( ror edi, 11 )\ AS2( add edx, H(i) )\ AS2( xor AS_REG_7d, edi )/* s0(W_t-15) */\ AS2( add AS_REG_7d, ebx )/* W_t = s1(W_t-2) + W_t-7 + s0(W_t-15) W_t-16*/\ AS2( mov [Wt(i)], AS_REG_7d)\ AS2( add edx, AS_REG_7d )\ #define ROUND(i, r, eax, ecx, edi, edx)\ /* in: edi = E */\ /* unused: eax, ecx, temp: ebx, AS_REG_7d, out: edx = T1 */\ AS2( mov edx, F(i) )\ AS2( xor edx, G(i) )\ AS2( and edx, edi )\ AS2( xor edx, G(i) )/* Ch(E,F,G) = (G^(E&(F^G))) */\ AS2( mov AS_REG_7d, edi )\ AS2( ror edi, 6 )\ AS2( ror AS_REG_7d, 25 )\ RA##r(i, edx, edi )/* H + Wt + Kt + Ch(E,F,G) */\ AS2( xor AS_REG_7d, edi )\ AS2( ror edi, 5 )\ AS2( xor AS_REG_7d, edi )/* S1(E) */\ AS2( add edx, AS_REG_7d )/* T1 = S1(E) + Ch(E,F,G) + H + Wt + Kt */\ RB##r(i, edx, edi )/* H + Wt + Kt + Ch(E,F,G) */\ /* in: ecx = A, eax = B^C, edx = T1 */\ /* unused: edx, temp: ebx, AS_REG_7d, out: eax = A, ecx = B^C, edx = E */\ AS2( mov ebx, ecx )\ AS2( xor ecx, B(i) )/* A^B */\ AS2( and eax, ecx )\ AS2( xor eax, B(i) )/* Maj(A,B,C) = B^((A^B)&(B^C) */\ AS2( mov AS_REG_7d, ebx )\ AS2( ror ebx, 2 )\ AS2( add eax, edx )/* T1 + Maj(A,B,C) */\ AS2( add edx, D(i) )\ AS2( mov D(i), edx )\ AS2( ror AS_REG_7d, 22 )\ AS2( xor AS_REG_7d, ebx )\ AS2( ror ebx, 11 )\ AS2( xor AS_REG_7d, ebx )\ AS2( add eax, AS_REG_7d )/* T1 + S0(A) + Maj(A,B,C) */\ AS2( mov H(i), eax )\ #define SWAP_COPY(i) \ AS2( mov WORD_REG(bx), [WORD_REG(dx)+i*WORD_SZ])\ AS1( bswap WORD_REG(bx))\ AS2( mov [Wt(i*(1+CRYPTOPP_BOOL_X64)+CRYPTOPP_BOOL_X64)], WORD_REG(bx)) #if defined(__GNUC__) #if CRYPTOPP_BOOL_X64 FixedSizeAlignedSecBlock workspace; #endif __asm__ __volatile__ ( #if CRYPTOPP_BOOL_X64 "lea %4, %%r8;" #endif ".intel_syntax noprefix;" #elif defined(CRYPTOPP_GENERATE_X64_MASM) ALIGN 8 X86_SHA256_HashBlocks PROC FRAME rex_push_reg rsi push_reg rdi push_reg rbx push_reg rbp alloc_stack(LOCALS_SIZE+8) .endprolog mov rdi, r8 lea rsi, [?SHA256_K@CryptoPP@@3QBIB + 48*4] #endif #if CRYPTOPP_BOOL_X86 #ifndef __GNUC__ AS2( mov edi, [len]) AS2( lea WORD_REG(si), [SHA256_K+48*4]) #endif #if !defined(_MSC_VER) || (_MSC_VER < 1400) AS_PUSH_IF86(bx) #endif AS_PUSH_IF86(bp) AS2( mov ebx, esp) AS2( and esp, -16) AS2( sub WORD_REG(sp), LOCALS_SIZE) AS_PUSH_IF86(bx) #endif AS2( mov STATE_SAVE, WORD_REG(cx)) AS2( mov DATA_SAVE, WORD_REG(dx)) AS2( lea WORD_REG(ax), [WORD_REG(di) + WORD_REG(dx)]) AS2( mov DATA_END, WORD_REG(ax)) AS2( mov K_END, WORD_REG(si)) #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE #if CRYPTOPP_BOOL_X86 AS2( test edi, 1) ASJ( jnz, 2, f) AS1( dec DWORD PTR K_END) #endif AS2( movdqa xmm0, XMMWORD_PTR [WORD_REG(cx)+0*16]) AS2( movdqa xmm1, XMMWORD_PTR [WORD_REG(cx)+1*16]) #endif #if CRYPTOPP_BOOL_X86 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASJ( jmp, 0, f) #endif ASL(2) // non-SSE2 AS2( mov esi, ecx) AS2( lea edi, A(0)) AS2( mov ecx, 8) AS1( rep movsd) AS2( mov esi, K_END) ASJ( jmp, 3, f) #endif #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASL(0) AS2( movdqa E(0), xmm1) AS2( movdqa A(0), xmm0) #endif #if CRYPTOPP_BOOL_X86 ASL(3) #endif AS2( sub WORD_REG(si), 48*4) SWAP_COPY(0) SWAP_COPY(1) SWAP_COPY(2) SWAP_COPY(3) SWAP_COPY(4) SWAP_COPY(5) SWAP_COPY(6) SWAP_COPY(7) #if CRYPTOPP_BOOL_X86 SWAP_COPY(8) SWAP_COPY(9) SWAP_COPY(10) SWAP_COPY(11) SWAP_COPY(12) SWAP_COPY(13) SWAP_COPY(14) SWAP_COPY(15) #endif AS2( mov edi, E(0)) // E AS2( mov eax, B(0)) // B AS2( xor eax, C(0)) // B^C AS2( mov ecx, A(0)) // A ROUND(0, 0, eax, ecx, edi, edx) ROUND(1, 0, ecx, eax, edx, edi) ROUND(2, 0, eax, ecx, edi, edx) ROUND(3, 0, ecx, eax, edx, edi) ROUND(4, 0, eax, ecx, edi, edx) ROUND(5, 0, ecx, eax, edx, edi) ROUND(6, 0, eax, ecx, edi, edx) ROUND(7, 0, ecx, eax, edx, edi) ROUND(8, 0, eax, ecx, edi, edx) ROUND(9, 0, ecx, eax, edx, edi) ROUND(10, 0, eax, ecx, edi, edx) ROUND(11, 0, ecx, eax, edx, edi) ROUND(12, 0, eax, ecx, edi, edx) ROUND(13, 0, ecx, eax, edx, edi) ROUND(14, 0, eax, ecx, edi, edx) ROUND(15, 0, ecx, eax, edx, edi) ASL(1) AS2(add WORD_REG(si), 4*16) ROUND(0, 1, eax, ecx, edi, edx) ROUND(1, 1, ecx, eax, edx, edi) ROUND(2, 1, eax, ecx, edi, edx) ROUND(3, 1, ecx, eax, edx, edi) ROUND(4, 1, eax, ecx, edi, edx) ROUND(5, 1, ecx, eax, edx, edi) ROUND(6, 1, eax, ecx, edi, edx) ROUND(7, 1, ecx, eax, edx, edi) ROUND(8, 1, eax, ecx, edi, edx) ROUND(9, 1, ecx, eax, edx, edi) ROUND(10, 1, eax, ecx, edi, edx) ROUND(11, 1, ecx, eax, edx, edi) ROUND(12, 1, eax, ecx, edi, edx) ROUND(13, 1, ecx, eax, edx, edi) ROUND(14, 1, eax, ecx, edi, edx) ROUND(15, 1, ecx, eax, edx, edi) AS2( cmp WORD_REG(si), K_END) ASJ( jb, 1, b) AS2( mov WORD_REG(dx), DATA_SAVE) AS2( add WORD_REG(dx), 64) AS2( mov AS_REG_7, STATE_SAVE) AS2( mov DATA_SAVE, WORD_REG(dx)) #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE #if CRYPTOPP_BOOL_X86 AS2( test DWORD PTR K_END, 1) ASJ( jz, 4, f) #endif AS2( movdqa xmm1, XMMWORD_PTR [AS_REG_7+1*16]) AS2( movdqa xmm0, XMMWORD_PTR [AS_REG_7+0*16]) AS2( paddd xmm1, E(0)) AS2( paddd xmm0, A(0)) AS2( movdqa [AS_REG_7+1*16], xmm1) AS2( movdqa [AS_REG_7+0*16], xmm0) AS2( cmp WORD_REG(dx), DATA_END) ASJ( jb, 0, b) #endif #if CRYPTOPP_BOOL_X86 #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASJ( jmp, 5, f) ASL(4) // non-SSE2 #endif AS2( add [AS_REG_7+0*4], ecx) // A AS2( add [AS_REG_7+4*4], edi) // E AS2( mov eax, B(0)) AS2( mov ebx, C(0)) AS2( mov ecx, D(0)) AS2( add [AS_REG_7+1*4], eax) AS2( add [AS_REG_7+2*4], ebx) AS2( add [AS_REG_7+3*4], ecx) AS2( mov eax, F(0)) AS2( mov ebx, G(0)) AS2( mov ecx, H(0)) AS2( add [AS_REG_7+5*4], eax) AS2( add [AS_REG_7+6*4], ebx) AS2( add [AS_REG_7+7*4], ecx) AS2( mov ecx, AS_REG_7d) AS2( cmp WORD_REG(dx), DATA_END) ASJ( jb, 2, b) #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASL(5) #endif #endif AS_POP_IF86(sp) AS_POP_IF86(bp) #if !defined(_MSC_VER) || (_MSC_VER < 1400) AS_POP_IF86(bx) #endif #ifdef CRYPTOPP_GENERATE_X64_MASM add rsp, LOCALS_SIZE+8 pop rbp pop rbx pop rdi pop rsi ret X86_SHA256_HashBlocks ENDP #endif #ifdef __GNUC__ ".att_syntax prefix;" : : "c" (state), "d" (data), "S" (SHA256_K+48), "D" (len) #if CRYPTOPP_BOOL_X64 , "m" (workspace[0]) #endif : "memory", "cc", "%eax" #if CRYPTOPP_BOOL_X64 , "%rbx", "%r8", "%r10" #endif ); #endif } static inline bool HasSSE2(void) { return false; } static void SHA256_Transform32(word32 *state, const word32 *data) { word32 W[16]; swap32yes(W, data, 16); X86_SHA256_HashBlocks(state, W, 16 * 4); } static void runhash32(void *state, const void *input, const void *init) { memcpy(state, init, 32); SHA256_Transform32(state, input); } /* suspiciously similar to ScanHash* from bitcoin */ bool scanhash_asm32(struct thr_info*thr, const unsigned char *midstate, unsigned char *data, unsigned char *hash1, unsigned char *hash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n) { uint32_t *hash32 = (uint32_t *) hash; uint32_t *nonce = (uint32_t *)(data + 76); data += 64; while (1) { *nonce = n; runhash32(hash1, data, midstate); runhash32(hash, hash1, sha256_init_state); if (unlikely((hash32[7] == 0) && fulltest(hash, target))) { *last_nonce = n; return true; } if ((n >= max_nonce) || thr->work_restart) { *last_nonce = n; return false; } ++n; } } #endif // #if defined(WANT_CRYPTOPP_ASM32) bfgminer-bfgminer-3.10.0/sha256_generic.c000066400000000000000000000233371226556647300200530ustar00rootroot00000000000000/* * Cryptographic API. * * SHA-256, as specified in * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf * * SHA-256 code by Jean-Luc Cooke . * * Copyright (c) Jean-Luc Cooke * Copyright (c) Andrew McDonald * Copyright (c) 2002 James Morris * SHA224 Support Copyright 2007 Intel Corporation * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include "config.h" #include #include #include #include #include "miner.h" typedef uint32_t u32; typedef uint8_t u8; static inline u32 ror32(u32 word, unsigned int shift) { return (word >> shift) | (word << (32 - shift)); } static inline u32 Ch(u32 x, u32 y, u32 z) { return z ^ (x & (y ^ z)); } static inline u32 Maj(u32 x, u32 y, u32 z) { return (x & y) | (z & (x | y)); } #define e0(x) (ror32(x, 2) ^ ror32(x,13) ^ ror32(x,22)) #define e1(x) (ror32(x, 6) ^ ror32(x,11) ^ ror32(x,25)) #define s0(x) (ror32(x, 7) ^ ror32(x,18) ^ (x >> 3)) #define s1(x) (ror32(x,17) ^ ror32(x,19) ^ (x >> 10)) static inline void LOAD_OP(int I, u32 *W, const u8 *input) { /* byteswap is handled once in scanhash_c */ W[I] = /* ntohl */ ( ((u32*)(input))[I] ); } static inline void BLEND_OP(int I, u32 *W) { W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; } static void sha256_transform(u32 *state, const u8 *input) { u32 a, b, c, d, e, f, g, h, t1, t2; u32 W[64]; int i; /* load the input */ for (i = 0; i < 16; i++) LOAD_OP(i, W, input); /* now blend */ for (i = 16; i < 64; i++) BLEND_OP(i, W); /* load the state into our registers */ a=state[0]; b=state[1]; c=state[2]; d=state[3]; e=state[4]; f=state[5]; g=state[6]; h=state[7]; /* now iterate */ t1 = h + e1(e) + Ch(e,f,g) + 0x428a2f98 + W[ 0]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x71374491 + W[ 1]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0xb5c0fbcf + W[ 2]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0xe9b5dba5 + W[ 3]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x3956c25b + W[ 4]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x59f111f1 + W[ 5]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x923f82a4 + W[ 6]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0xab1c5ed5 + W[ 7]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0xd807aa98 + W[ 8]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x12835b01 + W[ 9]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x243185be + W[10]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x550c7dc3 + W[11]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x72be5d74 + W[12]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x80deb1fe + W[13]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x9bdc06a7 + W[14]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0xc19bf174 + W[15]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0xe49b69c1 + W[16]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0xefbe4786 + W[17]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x0fc19dc6 + W[18]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x240ca1cc + W[19]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x2de92c6f + W[20]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x4a7484aa + W[21]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x5cb0a9dc + W[22]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x76f988da + W[23]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x983e5152 + W[24]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0xa831c66d + W[25]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0xb00327c8 + W[26]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0xbf597fc7 + W[27]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0xc6e00bf3 + W[28]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0xd5a79147 + W[29]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x06ca6351 + W[30]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x14292967 + W[31]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x27b70a85 + W[32]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x2e1b2138 + W[33]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x4d2c6dfc + W[34]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x53380d13 + W[35]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x650a7354 + W[36]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x766a0abb + W[37]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x81c2c92e + W[38]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x92722c85 + W[39]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0xa2bfe8a1 + W[40]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0xa81a664b + W[41]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0xc24b8b70 + W[42]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0xc76c51a3 + W[43]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0xd192e819 + W[44]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0xd6990624 + W[45]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0xf40e3585 + W[46]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x106aa070 + W[47]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x19a4c116 + W[48]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x1e376c08 + W[49]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x2748774c + W[50]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x34b0bcb5 + W[51]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x391c0cb3 + W[52]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0x4ed8aa4a + W[53]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0x5b9cca4f + W[54]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0x682e6ff3 + W[55]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; t1 = h + e1(e) + Ch(e,f,g) + 0x748f82ee + W[56]; t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2; t1 = g + e1(d) + Ch(d,e,f) + 0x78a5636f + W[57]; t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2; t1 = f + e1(c) + Ch(c,d,e) + 0x84c87814 + W[58]; t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2; t1 = e + e1(b) + Ch(b,c,d) + 0x8cc70208 + W[59]; t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2; t1 = d + e1(a) + Ch(a,b,c) + 0x90befffa + W[60]; t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2; t1 = c + e1(h) + Ch(h,a,b) + 0xa4506ceb + W[61]; t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2; t1 = b + e1(g) + Ch(g,h,a) + 0xbef9a3f7 + W[62]; t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2; t1 = a + e1(f) + Ch(f,g,h) + 0xc67178f2 + W[63]; t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2; state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e; state[5] += f; state[6] += g; state[7] += h; #if 0 /* clear any sensitive info... */ a = b = c = d = e = f = g = h = t1 = t2 = 0; memset(W, 0, 64 * sizeof(u32)); #endif } static void runhash(void *state, const void *input, const void *init) { memcpy(state, init, 32); sha256_transform(state, input); } const uint32_t sha256_init_state[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 }; /* suspiciously similar to ScanHash* from bitcoin */ bool scanhash_c(struct thr_info*thr, const unsigned char *midstate, unsigned char *data, unsigned char *hash1, unsigned char *hash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n) { uint32_t *hash32 = (uint32_t *) hash; uint32_t *nonce = (uint32_t *)(data + 76); unsigned long stat_ctr = 0; data += 64; // Midstate and data are stored in little endian LOCAL_swap32le(unsigned char, midstate, 32/4) LOCAL_swap32le(unsigned char, data, 64/4) uint32_t *nonce_w = (uint32_t *)(data + 12); while (1) { *nonce_w = n; // runhash expects int32 data preprocessed into native endian runhash(hash1, data, midstate); runhash(hash, hash1, sha256_init_state); stat_ctr++; if (unlikely((hash32[7] == 0) && fulltest(hash, target))) { *nonce = htole32(n); *last_nonce = n; return true; } if ((n >= max_nonce) || thr->work_restart) { *nonce = htole32(n); *last_nonce = n; return false; } n++; } } bfgminer-bfgminer-3.10.0/sha256_sse2_amd64.c000066400000000000000000000077221226556647300203060ustar00rootroot00000000000000/* * SHA-256 driver for ASM routine for x86_64 on Linux * Copyright (c) Mark Crichton * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include "config.h" #include "driver-cpu.h" #ifdef WANT_X8664_SSE2 #include #include #include #include #include #include extern void sha256_sse2_64_new (__m128i *res, __m128i *res1, __m128i *data, const uint32_t init[8])__asm__("sha256_sse2_64_new"); static uint32_t g_sha256_k[]__attribute__((aligned(0x100))) = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, /* 0 */ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, /* 8 */ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, /* 16 */ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, /* 24 */ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, /* 32 */ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, /* 40 */ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, /* 48 */ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, /* 56 */ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; const uint32_t sha256_init_sse2[8]__asm__("sha256_init_sse2")__attribute__((aligned(0x100))) = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; __m128i g_4sha256_k[64]; __m128i sha256_consts_m128i[64]__asm__("sha256_consts_m128i")__attribute__((aligned(0x1000))); bool scanhash_sse2_64(struct thr_info*thr, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce) { uint32_t *hash32 = (uint32_t *)phash; uint32_t *nNonce_p = (uint32_t *)(pdata + 76); uint32_t m_midstate[8], m_w[16], m_w1[16]; __m128i m_4w[64] __attribute__ ((aligned (0x100))); __m128i m_4hash[64] __attribute__ ((aligned (0x100))); __m128i m_4hash1[64] __attribute__ ((aligned (0x100))); __m128i offset; int i; pdata += 64; /* For debugging */ union { __m128i m; uint32_t i[4]; } mi; /* Message expansion */ memcpy(m_midstate, pmidstate, sizeof(m_midstate)); memcpy(m_w, pdata, sizeof(m_w)); /* The 2nd half of the data */ memcpy(m_w1, phash1, sizeof(m_w1)); memset(m_4hash, 0, sizeof(m_4hash)); /* Transmongrify */ for (i = 0; i < 16; i++) m_4w[i] = _mm_set1_epi32(m_w[i]); for (i = 0; i < 16; i++) m_4hash1[i] = _mm_set1_epi32(m_w1[i]); for (i = 0; i < 64; i++) sha256_consts_m128i[i] = _mm_set1_epi32(g_sha256_k[i]); offset = _mm_set_epi32(0x3, 0x2, 0x1, 0x0); for (;;) { int j; m_4w[3] = _mm_add_epi32(offset, _mm_set1_epi32(nonce)); sha256_sse2_64_new (m_4hash, m_4hash1, m_4w, m_midstate); for (j = 0; j < 4; j++) { mi.m = m_4hash[7]; if (unlikely(mi.i[j] == 0)) break; } /* If j = true, we found a hit...so check it */ /* Use the C version for a check... */ if (unlikely(j != 4)) { for (i = 0; i < 8; i++) { mi.m = m_4hash[i]; *(uint32_t *)&(phash)[i*4] = mi.i[j]; } if (unlikely(hash32[7] == 0 && fulltest(phash, ptarget))) { nonce += j; *last_nonce = nonce + 1; *nNonce_p = nonce; return true; } } if (unlikely((nonce >= max_nonce) || thr->work_restart)) { *last_nonce = nonce; return false; } nonce += 4; } } #endif /* WANT_X8664_SSE2 */ bfgminer-bfgminer-3.10.0/sha256_sse2_i386.c000066400000000000000000000076121226556647300200620ustar00rootroot00000000000000/* * SHA-256 driver for ASM routine for x86_64 on Linux * Copyright (c) Mark Crichton * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include "config.h" #include "driver-cpu.h" #ifdef WANT_X8632_SSE2 #include #include #include #include #include #include extern void CalcSha256_x86 (__m128i *res, __m128i *data, const uint32_t init[8])__asm__("CalcSha256_x86")__attribute__((fastcall)); static uint32_t g_sha256_k[]__attribute__((aligned(0x100))) = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, /* 0 */ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, /* 8 */ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, /* 16 */ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, /* 24 */ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, /* 32 */ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, /* 40 */ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, /* 48 */ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, /* 56 */ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; const uint32_t sha256_32init[8]__attribute__((aligned(0x100))) = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; __m128i g_4sha256_k[64]; __m128i sha256_consts_m128i[64]__asm__("sha256_consts_m128i")__attribute__((aligned(0x1000))); bool scanhash_sse2_32(struct thr_info*thr, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce) { uint32_t *hash32 = (uint32_t *)phash; uint32_t *nNonce_p = (uint32_t *)(pdata + 76); uint32_t m_midstate[8], m_w[16], m_w1[16]; __m128i m_4w[64] __attribute__ ((aligned (0x100))); __m128i m_4hash[64] __attribute__ ((aligned (0x100))); __m128i m_4hash1[64] __attribute__ ((aligned (0x100))); __m128i offset; int i; pdata += 64; /* Message expansion */ memcpy(m_midstate, pmidstate, sizeof(m_midstate)); memcpy(m_w, pdata, sizeof(m_w)); /* The 2nd half of the data */ memcpy(m_w1, phash1, sizeof(m_w1)); memset(m_4hash, 0, sizeof(m_4hash)); /* Transmongrify */ for (i = 0; i < 16; i++) m_4w[i] = _mm_set1_epi32(m_w[i]); for (i = 0; i < 16; i++) m_4hash1[i] = _mm_set1_epi32(m_w1[i]); for (i = 0; i < 64; i++) sha256_consts_m128i[i] = _mm_set1_epi32(g_sha256_k[i]); offset = _mm_set_epi32(0x3, 0x2, 0x1, 0x0); for (;;) { int j; m_4w[3] = _mm_add_epi32(offset, _mm_set1_epi32(nonce)); /* Some optimization can be done here W.R.T. precalculating some hash */ CalcSha256_x86 (m_4hash1, m_4w, m_midstate); CalcSha256_x86 (m_4hash, m_4hash1, sha256_32init); for (j = 0; j < 4; j++) { if (unlikely(((uint32_t *)&(m_4hash[7]))[j] == 0)) { /* We found a hit...so check it */ /* Use the C version for a check... */ for (i = 0; i < 8; i++) { *(uint32_t *)&(phash)[i<<2] = ((uint32_t *)&(m_4hash[i]))[j]; } if (unlikely(hash32[7] == 0 && fulltest(phash, ptarget))) { nonce += j; *last_nonce = nonce; *nNonce_p = nonce; return true; } } } if (unlikely((nonce >= max_nonce) || thr->work_restart)) { *last_nonce = nonce; return false; } nonce += 4; } } #endif /* WANT_X8632_SSE2 */ bfgminer-bfgminer-3.10.0/sha256_sse4_amd64.c000066400000000000000000000074021226556647300203030ustar00rootroot00000000000000/* * SHA-256 driver for ASM routine for x86_64 on Linux * Copyright (c) Mark Crichton * Copyright 2012-2013 Luke Dashjr * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include "config.h" #include "driver-cpu.h" #ifdef WANT_X8664_SSE4 #include #include #include #include #include #include extern void CalcSha256_x64_sse4(__m128i *res, __m128i *data, uint32_t init[8])__asm__("CalcSha256_x64_sse4"); static uint32_t g_sha256_k[] = { 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, /* 0 */ 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, /* 8 */ 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, /* 16 */ 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, /* 24 */ 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, /* 32 */ 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, /* 40 */ 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, /* 48 */ 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, /* 56 */ 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; static uint32_t g_sha256_hinit[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; __m128i g_4sha256_k[64]__asm__("g_4sha256_k"); bool scanhash_sse4_64(struct thr_info*thr, const unsigned char *pmidstate, unsigned char *pdata, unsigned char *phash1, unsigned char *phash, const unsigned char *ptarget, uint32_t max_nonce, uint32_t *last_nonce, uint32_t nonce) { uint32_t *hash32 = (uint32_t *)phash; uint32_t *nNonce_p = (uint32_t *)(pdata + 76); uint32_t m_midstate[8], m_w[16], m_w1[16]; __m128i m_4w[64], m_4hash[64], m_4hash1[64]; __m128i offset; int i; pdata += 64; /* For debugging */ union { __m128i m; uint32_t i[4]; } mi; /* Message expansion */ memcpy(m_midstate, pmidstate, sizeof(m_midstate)); memcpy(m_w, pdata, sizeof(m_w)); /* The 2nd half of the data */ memcpy(m_w1, phash1, sizeof(m_w1)); memset(m_4hash, 0, sizeof(m_4hash)); /* Transmongrify */ for (i = 0; i < 16; i++) m_4w[i] = _mm_set1_epi32(m_w[i]); for (i = 0; i < 16; i++) m_4hash1[i] = _mm_set1_epi32(m_w1[i]); for (i = 0; i < 64; i++) g_4sha256_k[i] = _mm_set1_epi32(g_sha256_k[i]); offset = _mm_set_epi32(0x3, 0x2, 0x1, 0x0); for (;;) { int j; m_4w[3] = _mm_add_epi32(offset, _mm_set1_epi32(nonce)); /* Some optimization can be done here W.R.T. precalculating some hash */ CalcSha256_x64_sse4(m_4hash1, m_4w, m_midstate); CalcSha256_x64_sse4(m_4hash, m_4hash1, g_sha256_hinit); for (j = 0; j < 4; j++) { mi.m = m_4hash[7]; if (unlikely(mi.i[j] == 0)) break; } /* If j = true, we found a hit...so check it */ /* Use the C version for a check... */ if (unlikely(j != 4)) { for (i = 0; i < 8; i++) { mi.m = m_4hash[i]; *(uint32_t *)&(phash)[i*4] = mi.i[j]; } if (unlikely(hash32[7] == 0 && fulltest(phash, ptarget))) { nonce += j; *last_nonce = nonce; *nNonce_p = nonce; return true; } } if (unlikely((nonce >= max_nonce) || thr->work_restart)) { *last_nonce = nonce; return false; } nonce += 4; } } #endif /* WANT_X8664_SSE4 */ bfgminer-bfgminer-3.10.0/sha256_via.c000066400000000000000000000045671226556647300172220ustar00rootroot00000000000000/* * Copyright 2010-2011 Jeff Garzik * Copyright 2012-2013 Luke Dashjr * Copyright 2011 Con Kolivas * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include "driver-cpu.h" #include #include #include #include #include #include #include "miner.h" #ifdef WANT_VIA_PADLOCK static void via_sha256(void *hash, void *buf, unsigned len) { unsigned stat = 0; asm volatile(".byte 0xf3, 0x0f, 0xa6, 0xd0" :"+S"(buf), "+a"(stat) :"c"(len), "D" (hash) :"memory"); } bool scanhash_via(struct thr_info*thr, const unsigned char __maybe_unused *pmidstate, unsigned char *data_inout, unsigned char __maybe_unused *phash1, unsigned char __maybe_unused *phash, const unsigned char *target, uint32_t max_nonce, uint32_t *last_nonce, uint32_t n) { unsigned char data[128] __attribute__((aligned(128))); unsigned char tmp_hash[32] __attribute__((aligned(128))); unsigned char tmp_hash1[32] __attribute__((aligned(128))); uint32_t *data32 = (uint32_t *) data; uint32_t *hash32 = (uint32_t *) tmp_hash; uint32_t *nonce = (uint32_t *)(data + 64 + 12); uint32_t *nonce_inout = (uint32_t *)(data_inout + 64 + 12); unsigned long stat_ctr = 0; /* bitcoin gives us big endian input, but via wants LE, * so we reverse the swapping bitcoin has already done (extra work) * in order to permit the hardware to swap everything * back to BE again (extra work). */ swap32yes(data32, data_inout, 128/4); while (1) { *nonce = n; /* first SHA256 transform */ memcpy(tmp_hash1, sha256_init_state, 32); via_sha256(tmp_hash1, data, 80); /* or maybe 128? */ swap32yes(tmp_hash1, tmp_hash1, 32/4); /* second SHA256 transform */ memcpy(tmp_hash, sha256_init_state, 32); via_sha256(tmp_hash, tmp_hash1, 32); stat_ctr++; if (unlikely((hash32[7] == 0) && fulltest(tmp_hash, target))) { /* swap nonce'd data back into original storage area; */ *nonce_inout = bswap_32(n); *last_nonce = n; return true; } if ((n >= max_nonce) || thr->work_restart) { *last_nonce = n; return false; } n++; } } #endif /* WANT_VIA_PADLOCK */ bfgminer-bfgminer-3.10.0/spidevc.c000066400000000000000000000162471226556647300170060ustar00rootroot00000000000000/* * Copyright 2013 bitfury * Copyright 2013 Luke Dashjr * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config.h" #ifdef HAVE_LINUX_SPI_SPIDEV_H #define HAVE_LINUX_SPI #endif #include "spidevc.h" #include #include #include #include #include #include #include #ifdef HAVE_LINUX_SPI #include #include #include #include #include #include #include #include #include #include #endif #include "logging.h" #ifdef HAVE_LINUX_SPI bool sys_spi_txrx(struct spi_port *port); static volatile unsigned *gpio; #endif struct spi_port *sys_spi; void spi_init(void) { #ifdef HAVE_LINUX_SPI int fd; fd = open("/dev/mem",O_RDWR|O_SYNC); if (fd < 0) { perror("/dev/mem trouble"); return; } gpio = mmap(0,4096,PROT_READ|PROT_WRITE,MAP_SHARED,fd,0x20200000); if (gpio == MAP_FAILED) { perror("gpio mmap trouble"); return; } close(fd); sys_spi = malloc(sizeof(*sys_spi)); *sys_spi = (struct spi_port){ .txrx = sys_spi_txrx, }; #endif } #ifdef HAVE_LINUX_SPI #define INP_GPIO(g) *(gpio+((g)/10)) &= ~(7<<(((g)%10)*3)) #define OUT_GPIO(g) *(gpio+((g)/10)) |= (1<<(((g)%10)*3)) #define SET_GPIO_ALT(g,a) *(gpio+(((g)/10))) |= (((a)<=3?(a)+4:(a)==4?3:2)<<(((g)%10)*3)) #define GPIO_SET *(gpio+7) // sets bits which are 1 ignores bits which are 0 #define GPIO_CLR *(gpio+10) // clears bits which are 1 ignores bits which are 0 // Bit-banging reset, to reset more chips in chain - toggle for longer period... Each 3 reset cycles reset first chip in chain static int spi_reset(int a) { int i,j; int len = 8; INP_GPIO(10); OUT_GPIO(10); INP_GPIO(11); OUT_GPIO(11); GPIO_SET = 1 << 11; // Set SCK for (i = 0; i < 32; i++) { // On standard settings this unoptimized code produces 1 Mhz freq. GPIO_SET = 1 << 10; for (j = 0; j < len; j++) { a *= a; } GPIO_CLR = 1 << 10; for (j = 0; j < len; j++) { a *= a; } } GPIO_CLR = 1 << 10; GPIO_CLR = 1 << 11; INP_GPIO(10); SET_GPIO_ALT(10,0); INP_GPIO(11); SET_GPIO_ALT(11,0); INP_GPIO(9); SET_GPIO_ALT(9,0); return a; } #define BAILOUT(s) do{ \ perror(s); \ close(fd); \ return false; \ }while(0) bool sys_spi_txrx(struct spi_port *port) { const void *wrbuf = spi_gettxbuf(port); void *rdbuf = spi_getrxbuf(port); size_t bufsz = spi_getbufsz(port); int fd; int mode, bits, speed, rv, i, j; struct spi_ioc_transfer tr[16]; memset(&tr,0,sizeof(tr)); mode = 0; bits = 8; speed = 4000000; if (port->speed) speed = port->speed; spi_reset(1234); fd = open("/dev/spidev0.0", O_RDWR); if (fd < 0) { perror("Unable to open SPI device"); return false; } if (ioctl(fd, SPI_IOC_WR_MODE, &mode) < 0) BAILOUT("Unable to set WR MODE"); if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) BAILOUT("Unable to set RD MODE"); if (ioctl(fd, SPI_IOC_WR_BITS_PER_WORD, &bits) < 0) BAILOUT("Unable to set WR_BITS_PER_WORD"); if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) BAILOUT("Unable to set RD_BITS_PER_WORD"); if (ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ, &speed) < 0) BAILOUT("Unable to set WR_MAX_SPEED_HZ"); if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) BAILOUT("Unable to set RD_MAX_SPEED_HZ"); rv = 0; while (bufsz >= 4096) { tr[rv].tx_buf = (uintptr_t) wrbuf; tr[rv].rx_buf = (uintptr_t) rdbuf; tr[rv].len = 4096; tr[rv].delay_usecs = 1; tr[rv].speed_hz = speed; tr[rv].bits_per_word = bits; bufsz -= 4096; wrbuf += 4096; rdbuf += 4096; rv ++; } if (bufsz > 0) { tr[rv].tx_buf = (uintptr_t) wrbuf; tr[rv].rx_buf = (uintptr_t) rdbuf; tr[rv].len = (unsigned)bufsz; tr[rv].delay_usecs = 1; tr[rv].speed_hz = speed; tr[rv].bits_per_word = bits; rv ++; } i = rv; for (j = 0; j < i; j++) { rv = (int)ioctl(fd, SPI_IOC_MESSAGE(1), (intptr_t)&tr[j]); if (rv < 0) BAILOUT("WTF!"); } close(fd); spi_reset(4321); return true; } #endif static void *spi_emit_buf_reverse(struct spi_port *port, const void *p, size_t sz) { const unsigned char *str = p; void * const rv = &port->spibuf_rx[port->spibufsz]; if (port->spibufsz + sz >= SPIMAXSZ) return NULL; for (size_t i = 0; i < sz; ++i) { // Reverse bit order in each byte! unsigned char p = str[i]; p = ((p & 0xaa)>>1) | ((p & 0x55) << 1); p = ((p & 0xcc)>>2) | ((p & 0x33) << 2); p = ((p & 0xf0)>>4) | ((p & 0x0f) << 4); port->spibuf[port->spibufsz++] = p; } return rv; } void spi_emit_buf(struct spi_port * const port, const void * const str, const size_t sz) { if (port->spibufsz + sz >= SPIMAXSZ) return; memcpy(&port->spibuf[port->spibufsz], str, sz); port->spibufsz += sz; } /* TODO: in production, emit just bit-sequences! Eliminate padding to byte! */ void spi_emit_break(struct spi_port *port) { spi_emit_buf(port, "\x4", 1); } void spi_emit_fsync(struct spi_port *port) { spi_emit_buf(port, "\x6", 1); } void spi_emit_fasync(struct spi_port *port, int n) { int i; for (i = 0; i < n; i++) { spi_emit_buf(port, "\x5", 1); } } void spi_emit_nop(struct spi_port *port, int n) { int i; for (i = 0; i < n; ++i) { spi_emit_buf(port, "\x0", 1); } } void *spi_emit_data(struct spi_port *port, uint16_t addr, const void *buf, size_t len) { unsigned char otmp[3]; if (len < 4 || len > 128) return NULL; /* This cannot be programmed in single frame! */ len /= 4; /* Strip */ otmp[0] = (len - 1) | 0xE0; otmp[1] = (addr >> 8)&0xFF; otmp[2] = addr & 0xFF; spi_emit_buf(port, otmp, 3); return spi_emit_buf_reverse(port, buf, len*4); } #ifdef USE_BFSB void spi_bfsb_select_bank(int bank) { static int last_bank = -2; if (bank == last_bank) return; const int banks[4]={18,23,24,25}; // GPIO connected to OE of level shifters int i; for(i=0;i<4;i++) { INP_GPIO(banks[i]); OUT_GPIO(banks[i]); if(i==bank) { GPIO_SET = 1 << banks[i]; // enable bank } else { GPIO_CLR = 1 << banks[i];// disable bank } } last_bank = bank; } #endif bfgminer-bfgminer-3.10.0/spidevc.h000066400000000000000000000041551226556647300170060ustar00rootroot00000000000000#ifndef SPIDEVC_H #define SPIDEVC_H #include #include #include #define SPIMAXSZ (256*1024) /* Initialize SPI using this function */ void spi_init(void); /* Do not allocate spi_port on the stack! OS X, at least, has a 512 KB default stack size for secondary threads This includes struct assignments which get allocated on the stack before being assigned to */ struct spi_port { /* TX-RX single frame */ bool (*txrx)(struct spi_port *port); char spibuf[SPIMAXSZ], spibuf_rx[SPIMAXSZ]; size_t spibufsz; void *userp; struct cgpu_info *cgpu; const char *repr; int logprio; int fd; uint32_t speed; uint16_t delay; uint8_t mode; uint8_t bits; }; extern struct spi_port *sys_spi; /* SPI BUFFER OPS */ static inline void spi_clear_buf(struct spi_port *port) { port->spibufsz = 0; } static inline void *spi_getrxbuf(struct spi_port *port) { return port->spibuf_rx; } static inline void *spi_gettxbuf(struct spi_port *port) { return port->spibuf; } static inline size_t spi_getbufsz(struct spi_port *port) { return port->spibufsz; } extern void spi_emit_buf(struct spi_port *, const void *, size_t); extern void spi_emit_break(struct spi_port *port); /* BREAK CONNECTIONS AFTER RESET */ extern void spi_emit_fsync(struct spi_port *port); /* FEED-THROUGH TO NEXT CHIP SYNCHRONOUSLY (WITH FLIP-FLOP) */ extern void spi_emit_fasync(struct spi_port *port, int n); /* FEED-THROUGH TO NEXT CHIP ASYNCHRONOUSLY (WITHOUT FLIP-FLOP INTERMEDIATE) */ extern void spi_emit_nop(struct spi_port *port, int n); /* TRANSMIT PROGRAMMING SEQUENCE (AND ALSO READ-BACK) */ /* addr is the destination address in bits (16-bit - 0 to 0xFFFF valid ones) buf is buffer to be transmitted, it will go at position spi_getbufsz()+3 len is length in _bytes_, should be 4 to 128 and be multiple of 4, as smallest transmission quantum is 32 bits */ extern void *spi_emit_data(struct spi_port *port, uint16_t addr, const void *buf, size_t len); static inline bool spi_txrx(struct spi_port *port) { return port->txrx(port); } extern bool sys_spi_txrx(struct spi_port *); void spi_bfsb_select_bank(int bank); #endif bfgminer-bfgminer-3.10.0/tm_i2c.c000066400000000000000000000072261226556647300165230ustar00rootroot00000000000000/* * Copyright 2013 gluk * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "config.h" #include #include #include #ifdef NEED_LINUX_I2C_H #include #endif #include #include "logging.h" #include "tm_i2c.h" static int tm_i2c_fd; float tm_i2c_Data2Temp(unsigned int ans) { float t = ans; return (t / 1023.0 * 3.3 * 2-2.73) * 100.0; } float tm_i2c_Data2Core(unsigned int ans) { float t = ans; return t / 1023.0 * 3.3; } int tm_i2c_init() { if ((tm_i2c_fd = open("/dev/i2c-1", O_RDWR)) < 0) return 1; else return 0; } void tm_i2c_close() { close(tm_i2c_fd); } unsigned int tm_i2c_req(int fd, unsigned char addr, unsigned char cmd, unsigned int data) { int i; unsigned char buf[16]; struct i2c_msg msg; tm_struct *tm = (tm_struct *) buf; struct i2c_rdwr_ioctl_data msg_rdwr; unsigned int ret; //applog(LOG_DEBUG, "REQ from %02X cmd: %02X", addr, cmd); tm->cmd = cmd; tm->data_lsb = data & 0xFF; tm->data_msb = (data & 0xFF00) >> 8; /* Write CMD */ msg.addr = addr; msg.flags = 0; msg.len = 3; msg.buf = (void*)tm; msg_rdwr.msgs = &msg; msg_rdwr.nmsgs = 1; if ((i = ioctl(fd, I2C_RDWR, &msg_rdwr)) < 0) { // perror("ioctl error"); return -1; } /* Read result */ msg.addr = addr; msg.flags = I2C_M_RD; msg.len = 3; msg.buf = (void*)tm; msg_rdwr.msgs = &msg; msg_rdwr.nmsgs = 1; if ((i = ioctl(fd, I2C_RDWR, &msg_rdwr)) < 0) { // perror("ioctl error"); return -1; } //hexdump(buf, 10); ret = (tm->data_msb << 8) + tm->data_lsb; if (tm->cmd == cmd) return ret; return 0; } int tm_i2c_detect(unsigned char slot) { if (slot < 0 || slot > 31) return 0; return tm_i2c_req(tm_i2c_fd, (TM_ADDR >> 1) + slot, TM_GET_CORE0, 0); } float tm_i2c_getcore0(unsigned char slot) { if (slot < 0 || slot > 31) return 0; return tm_i2c_Data2Core(tm_i2c_req(tm_i2c_fd, (TM_ADDR >> 1) + slot, TM_GET_CORE0, 0)); } float tm_i2c_getcore1(unsigned char slot) { if (slot < 0 || slot > 31) return 0; return tm_i2c_Data2Core(tm_i2c_req(tm_i2c_fd, (TM_ADDR >> 1) + slot, TM_GET_CORE1, 0)); } float tm_i2c_gettemp(unsigned char slot) { if (slot < 0 || slot > 31) return 0; return tm_i2c_Data2Temp(tm_i2c_req(tm_i2c_fd, (TM_ADDR >> 1) + slot, TM_GET_TEMP, 0)); } void tm_i2c_set_oe(unsigned char slot) { if (slot < 0 || slot > 31) return; tm_i2c_req(tm_i2c_fd, (TM_ADDR >> 1) + slot, TM_SET_OE, 0); } void tm_i2c_clear_oe(unsigned char slot) { if (slot < 0 || slot > 31) return; tm_i2c_req(tm_i2c_fd, (TM_ADDR >> 1) + slot, TM_SET_OE, 1); } unsigned char tm_i2c_slot2addr(unsigned char slot) { if (slot < 0 || slot > 31) return 0; return ((TM_ADDR >> 1) + slot); } bfgminer-bfgminer-3.10.0/tm_i2c.h000066400000000000000000000021551226556647300165240ustar00rootroot00000000000000/* - Version 1.0 - */ #define TM_ADDR 0xC0 #define TM_GET_TEMP 0x10 #define TM_GET_CORE0 0x11 #define TM_GET_CORE1 0x12 #define TM_SET_OE 0x20 #define TM_SET_MODE 0x21 #define TM_SET_RED 0x22 #define TM_SET_GREEN 0x23 #define TM_GET_PORTB 0x30 #define TM_SET_PORTB 0x31 #define TM_GET_PINB 0x32 #define TM_GET_PORTD 0x33 #define TM_SET_PORTD 0x34 #define TM_GET_PIND 0x35 #define TM_GET_ADC 0x36 #define TM_MODE_AUTO 0 #define TM_MODE_MANUAL 1 typedef struct { unsigned char cmd; unsigned char data_lsb; unsigned char data_msb; } tm_struct; int tm_i2c_init(); void tm_i2c_close(); unsigned int tm_i2c_req(int fd, unsigned char addr, unsigned char cmd, unsigned int data); float tm_i2c_Data2Temp(unsigned int ans); float tm_i2c_Data2Core(unsigned int ans); float tm_i2c_gettemp(unsigned char slot); float tm_i2c_getcore0(unsigned char slot); float tm_i2c_getcore1(unsigned char slot); void tm_i2c_set_oe(unsigned char slot); void tm_i2c_clear_oe(unsigned char slot); int tm_i2c_detect(unsigned char slot); unsigned char tm_i2c_slot2addr(unsigned char slot); bfgminer-bfgminer-3.10.0/todo_ztex.txt000066400000000000000000000002711226556647300177530ustar00rootroot00000000000000- verify setting cgpu.status=DEAD does in fact stop the thread - allow configuring bitstream directory - HS fpga config - allow configuring LIBZTEX_OVERHEATTHRESHOLD - hotplug support? bfgminer-bfgminer-3.10.0/usbtest.py000077500000000000000000000065521226556647300172510ustar00rootroot00000000000000#!/usr/bin/env python # Copyright 2012 Xiangfu # Copyright 2012-2013 Andrew Smith # Copyright 2013 Luke Dashjr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 3 of the License, or (at your option) any later # version. See COPYING for more details. # Linux usage: ./usbtest.py /dev/ttyUSB0 0xhexcodes|string|icarus # OR python usbtest.py /dev/ttyUSB0 0xhexcodes|string|icarus # # Windows usage: ./usbtest.py COM1 0xhexcodes|string|icarus # # sends the data sepcified to the USB device and waits # for a reply then displays it # # the data can be: # 0xhexcodes: e.g. 0x68656c6c6f20776f726c640a # would send "hello world\n" # # string: e.g. sendsometext # # icarus: sends 2 known block payloads for an icarus device # and shows the expected and actual answers if it's # a working V3 icarus import sys import serial import binascii if len(sys.argv) < 2: sys.stderr.write("Usage: " + sys.argv[0] + " device strings...\n") sys.stderr.write(" where device is either like /dev/ttyUSB0 or COM1\n") sys.stderr.write(" and strings are either '0xXXXX' or 'text'\n") sys.stderr.write(" if the first string is 'icarus' the rest are ignored\n") sys.stderr.write(" and 2 valid icarus test payloads are sent with results displayed\n") sys.stderr.write("\nAfter any command is sent it waits up to 30 seconds for a reply\n"); sys.exit("Aborting") # Open with a 10 second timeout - just to be sure ser = serial.Serial(sys.argv[1], 115200, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE, 10, False, False, 5) if sys.argv[2] == "icarus": # This show how Icarus use the block and midstate data # This will produce nonce 063c5e01 block = "0000000120c8222d0497a7ab44a1a2c7bf39de941c9970b1dc7cdc400000079700000000e88aabe1f353238c668d8a4df9318e614c10c474f8cdf8bc5f6397b946c33d7c4e7242c31a098ea500000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000" midstate = "33c5bf5751ec7f7e056443b5aee3800331432c83f404d9de38b94ecbf907b92d" rdata2 = binascii.a2b_hex(block.encode('ascii'))[95:63:-1] rmid = binascii.a2b_hex(midstate.encode('ascii'))[::-1] payload = rmid + rdata2 print("Push payload to icarus: " + binascii.hexlify(payload).decode('ascii')) ser.write(payload) b=ser.read(4) print("Result:(should be: 063c5e01): " + binascii.hexlify(b).decode('ascii')) # Just another test payload2 = "ce92099c5a80bb81c52990d5c0924c625fd25a535640607d5a4bdf8174e2c8d500000000000000000000000080000000000000000b290c1a42313b4f21b5bcb8" print("Push payload to icarus: " + payload2) ser.write(binascii.a2b_hex(payload2.encode('ascii'))) b=ser.read(4) print("Result:(should be: 8e0b31c5): " + binascii.hexlify(b).decode('ascii')) else: data = b"" for arg in sys.argv[2::]: if arg[0:2:] == '0x': data += binascii.a2b_hex(arg[2::].encode('ascii')) else: data += arg.encode('latin-1') print("Sending: 0x" + binascii.hexlify(data).decode('ascii')) ser.write(data) # If you're expecting more than one linefeed terminated reply, # you'll only see the first one # AND with no linefeed, this will wait the 10 seconds before returning print("Waiting up to 10 seconds ...") b=ser.readline() print("Result: hex 0x" + binascii.hexlify(b).decode('ascii')) print("Result: asc %s" % (repr(b),)) ser.close() bfgminer-bfgminer-3.10.0/util.c000066400000000000000000002144731226556647300163270ustar00rootroot00000000000000/* * Copyright 2011-2013 Con Kolivas * Copyright 2011-2013 Luke Dashjr * Copyright 2010 Jeff Garzik * Copyright 2012 Giel van Schijndel * Copyright 2012 Gavin Andresen * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_SYS_PRCTL_H # include #endif #if defined(__FreeBSD__) || defined(__OpenBSD__) # include #endif #ifndef WIN32 #include # ifdef __linux # include # endif # include # include # include # include #else # include # include # include # include # include #endif #include #ifdef NEED_BFG_LOWL_VCOM #include "lowl-vcom.h" #endif #include "miner.h" #include "compat.h" #include "util.h" #define DEFAULT_SOCKWAIT 60 bool successful_connect = false; struct timeval nettime; struct data_buffer { void *buf; size_t len; curl_socket_t *idlemarker; }; struct upload_buffer { const void *buf; size_t len; }; struct header_info { char *lp_path; int rolltime; char *reason; char *stratum_url; bool hadrolltime; bool canroll; bool hadexpire; }; struct tq_ent { void *data; struct tq_ent *prev; struct tq_ent *next; }; static void databuf_free(struct data_buffer *db) { if (!db) return; free(db->buf); #ifdef DEBUG_DATABUF applog(LOG_DEBUG, "databuf_free(%p)", db->buf); #endif memset(db, 0, sizeof(*db)); } // aka data_buffer_write static size_t all_data_cb(const void *ptr, size_t size, size_t nmemb, void *user_data) { struct data_buffer *db = user_data; size_t oldlen, newlen; oldlen = db->len; if (unlikely(nmemb == 0 || size == 0 || oldlen >= SIZE_MAX - size)) return 0; if (unlikely(nmemb > (SIZE_MAX - oldlen) / size)) nmemb = (SIZE_MAX - oldlen) / size; size_t len = size * nmemb; void *newmem; static const unsigned char zero = 0; if (db->idlemarker) { const unsigned char *cptr = ptr; for (size_t i = 0; i < len; ++i) if (!(isCspace(cptr[i]) || cptr[i] == '{')) { *db->idlemarker = CURL_SOCKET_BAD; db->idlemarker = NULL; break; } } newlen = oldlen + len; newmem = realloc(db->buf, newlen + 1); #ifdef DEBUG_DATABUF applog(LOG_DEBUG, "data_buffer_write realloc(%p, %lu) => %p", db->buf, (long unsigned)(newlen + 1), newmem); #endif if (!newmem) return 0; db->buf = newmem; db->len = newlen; memcpy(db->buf + oldlen, ptr, len); memcpy(db->buf + newlen, &zero, 1); /* null terminate */ return nmemb; } static size_t upload_data_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct upload_buffer *ub = user_data; unsigned int len = size * nmemb; if (len > ub->len) len = ub->len; if (len) { memcpy(ptr, ub->buf, len); ub->buf += len; ub->len -= len; } return len; } static size_t resp_hdr_cb(void *ptr, size_t size, size_t nmemb, void *user_data) { struct header_info *hi = user_data; size_t remlen, slen, ptrlen = size * nmemb; char *rem, *val = NULL, *key = NULL; void *tmp; val = calloc(1, ptrlen); key = calloc(1, ptrlen); if (!key || !val) goto out; tmp = memchr(ptr, ':', ptrlen); if (!tmp || (tmp == ptr)) /* skip empty keys / blanks */ goto out; slen = tmp - ptr; if ((slen + 1) == ptrlen) /* skip key w/ no value */ goto out; memcpy(key, ptr, slen); /* store & nul term key */ key[slen] = 0; rem = ptr + slen + 1; /* trim value's leading whitespace */ remlen = ptrlen - slen - 1; while ((remlen > 0) && (isCspace(*rem))) { remlen--; rem++; } memcpy(val, rem, remlen); /* store value, trim trailing ws */ val[remlen] = 0; while ((*val) && (isCspace(val[strlen(val) - 1]))) val[strlen(val) - 1] = 0; if (!*val) /* skip blank value */ goto out; if (opt_protocol) applog(LOG_DEBUG, "HTTP hdr(%s): %s", key, val); if (!strcasecmp("X-Roll-Ntime", key)) { hi->hadrolltime = true; if (!strncasecmp("N", val, 1)) applog(LOG_DEBUG, "X-Roll-Ntime: N found"); else { hi->canroll = true; /* Check to see if expire= is supported and if not, set * the rolltime to the default scantime */ if (strlen(val) > 7 && !strncasecmp("expire=", val, 7)) { sscanf(val + 7, "%d", &hi->rolltime); hi->hadexpire = true; } else hi->rolltime = opt_scantime; applog(LOG_DEBUG, "X-Roll-Ntime expiry set to %d", hi->rolltime); } } if (!strcasecmp("X-Long-Polling", key)) { hi->lp_path = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Reject-Reason", key)) { hi->reason = val; /* steal memory reference */ val = NULL; } if (!strcasecmp("X-Stratum", key)) { hi->stratum_url = val; val = NULL; } out: free(key); free(val); return ptrlen; } static int keep_sockalive(SOCKETTYPE fd) { const int tcp_one = 1; const int tcp_keepidle = 45; const int tcp_keepintvl = 30; int ret = 0; if (unlikely(setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const char *)&tcp_one, sizeof(tcp_one)))) ret = 1; #ifndef WIN32 int flags = fcntl(fd, F_GETFL, 0); fcntl(fd, F_SETFL, O_NONBLOCK | flags); #else u_long flags = 1; ioctlsocket(fd, FIONBIO, &flags); #endif if (!opt_delaynet) #ifndef __linux if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)))) #else /* __linux */ if (unlikely(setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)))) #endif /* __linux */ ret = 1; #ifdef __linux if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)))) ret = 1; if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)))) ret = 1; if (unlikely(setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)))) ret = 1; #endif /* __linux */ #ifdef __APPLE_CC__ if (unlikely(setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &tcp_keepintvl, sizeof(tcp_keepintvl)))) ret = 1; #endif /* __APPLE_CC__ */ #ifdef WIN32 const int zero = 0; struct tcp_keepalive vals; vals.onoff = 1; vals.keepalivetime = tcp_keepidle * 1000; vals.keepaliveinterval = tcp_keepintvl * 1000; DWORD outputBytes; if (unlikely(WSAIoctl(fd, SIO_KEEPALIVE_VALS, &vals, sizeof(vals), NULL, 0, &outputBytes, NULL, NULL))) ret = 1; /* Windows happily submits indefinitely to the send buffer blissfully * unaware nothing is getting there without gracefully failing unless * we disable the send buffer */ if (unlikely(setsockopt(fd, SOL_SOCKET, SO_SNDBUF, (const char *)&zero, sizeof(zero)))) ret = 1; #endif /* WIN32 */ return ret; } int json_rpc_call_sockopt_cb(void __maybe_unused *userdata, curl_socket_t fd, curlsocktype __maybe_unused purpose) { return keep_sockalive(fd); } static void last_nettime(struct timeval *last) { rd_lock(&netacc_lock); last->tv_sec = nettime.tv_sec; last->tv_usec = nettime.tv_usec; rd_unlock(&netacc_lock); } static void set_nettime(void) { wr_lock(&netacc_lock); cgtime(&nettime); wr_unlock(&netacc_lock); } static int curl_debug_cb(__maybe_unused CURL *handle, curl_infotype type, char *data, size_t size, void *userdata) { struct pool *pool = (struct pool *)userdata; switch(type) { case CURLINFO_HEADER_IN: case CURLINFO_DATA_IN: case CURLINFO_SSL_DATA_IN: pool->cgminer_pool_stats.bytes_received += size; total_bytes_rcvd += size; pool->cgminer_pool_stats.net_bytes_received += size; break; case CURLINFO_HEADER_OUT: case CURLINFO_DATA_OUT: case CURLINFO_SSL_DATA_OUT: pool->cgminer_pool_stats.bytes_sent += size; total_bytes_sent += size; pool->cgminer_pool_stats.net_bytes_sent += size; break; case CURLINFO_TEXT: { if (!opt_protocol) break; // data is not null-terminated, so we need to copy and terminate it for applog char datacp[size + 1]; memcpy(datacp, data, size); while (likely(size) && unlikely(isCspace(datacp[size-1]))) --size; if (unlikely(!size)) break; datacp[size] = '\0'; applog(LOG_DEBUG, "Pool %u: %s", pool->pool_no, datacp); break; } default: break; } return 0; } struct json_rpc_call_state { struct data_buffer all_data; struct header_info hi; void *priv; char curl_err_str[CURL_ERROR_SIZE]; struct curl_slist *headers; struct upload_buffer upload_data; struct pool *pool; }; void json_rpc_call_async(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool longpoll, struct pool *pool, bool share, void *priv) { struct json_rpc_call_state *state = malloc(sizeof(struct json_rpc_call_state)); *state = (struct json_rpc_call_state){ .priv = priv, .pool = pool, }; long timeout = longpoll ? (60 * 60) : 60; char len_hdr[64], user_agent_hdr[128]; struct curl_slist *headers = NULL; if (longpoll) state->all_data.idlemarker = &pool->lp_socket; /* it is assumed that 'curl' is freshly [re]initialized at this pt */ curl_easy_setopt(curl, CURLOPT_PRIVATE, state); curl_easy_setopt(curl, CURLOPT_TIMEOUT, timeout); /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed * to enable it */ curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, url); curl_easy_setopt(curl, CURLOPT_ENCODING, ""); curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1); /* Shares are staggered already and delays in submission can be costly * so do not delay them */ if (!opt_delaynet || share) curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, all_data_cb); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &state->all_data); curl_easy_setopt(curl, CURLOPT_READFUNCTION, upload_data_cb); curl_easy_setopt(curl, CURLOPT_READDATA, &state->upload_data); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, &state->curl_err_str[0]); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1); curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, resp_hdr_cb); curl_easy_setopt(curl, CURLOPT_HEADERDATA, &state->hi); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); if (pool->rpc_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); } else if (opt_socks_proxy) { curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5); } if (userpass) { curl_easy_setopt(curl, CURLOPT_USERPWD, userpass); curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); } if (longpoll) curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, json_rpc_call_sockopt_cb); curl_easy_setopt(curl, CURLOPT_POST, 1); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol request:\n%s", rpc_req); state->upload_data.buf = rpc_req; state->upload_data.len = strlen(rpc_req); sprintf(len_hdr, "Content-Length: %lu", (unsigned long) state->upload_data.len); sprintf(user_agent_hdr, "User-Agent: %s", PACKAGE"/"VERSION); headers = curl_slist_append(headers, "Content-type: application/json"); headers = curl_slist_append(headers, "X-Mining-Extensions: longpoll midstate rollntime submitold"); if (longpoll) headers = curl_slist_append(headers, "X-Minimum-Wait: 0"); if (likely(global_hashrate)) { char ghashrate[255]; sprintf(ghashrate, "X-Mining-Hashrate: %"PRIu64, (uint64_t)global_hashrate); headers = curl_slist_append(headers, ghashrate); } headers = curl_slist_append(headers, len_hdr); headers = curl_slist_append(headers, user_agent_hdr); headers = curl_slist_append(headers, "Expect:"); /* disable Expect hdr*/ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); state->headers = headers; if (opt_delaynet) { /* Don't delay share submission, but still track the nettime */ if (!share) { long long now_msecs, last_msecs; struct timeval now, last; cgtime(&now); last_nettime(&last); now_msecs = (long long)now.tv_sec * 1000; now_msecs += now.tv_usec / 1000; last_msecs = (long long)last.tv_sec * 1000; last_msecs += last.tv_usec / 1000; if (now_msecs > last_msecs && now_msecs - last_msecs < 250) { struct timespec rgtp; rgtp.tv_sec = 0; rgtp.tv_nsec = (250 - (now_msecs - last_msecs)) * 1000000; nanosleep(&rgtp, NULL); } } set_nettime(); } } json_t *json_rpc_call_completed(CURL *curl, int rc, bool probe, int *rolltime, void *out_priv) { struct json_rpc_call_state *state; if (curl_easy_getinfo(curl, CURLINFO_PRIVATE, (void*)&state) != CURLE_OK) { applog(LOG_ERR, "Failed to get private curl data"); if (out_priv) *(void**)out_priv = NULL; goto err_out; } if (out_priv) *(void**)out_priv = state->priv; json_t *val, *err_val, *res_val; json_error_t err; struct pool *pool = state->pool; bool probing = probe && !pool->probed; if (rc) { applog(LOG_INFO, "HTTP request failed: %s", state->curl_err_str); goto err_out; } if (!state->all_data.buf) { applog(LOG_DEBUG, "Empty data received in json_rpc_call."); goto err_out; } pool->cgminer_pool_stats.times_sent++; pool->cgminer_pool_stats.times_received++; if (probing) { pool->probed = true; /* If X-Long-Polling was found, activate long polling */ if (state->hi.lp_path) { if (pool->hdr_path != NULL) free(pool->hdr_path); pool->hdr_path = state->hi.lp_path; } else pool->hdr_path = NULL; if (state->hi.stratum_url) { pool->stratum_url = state->hi.stratum_url; state->hi.stratum_url = NULL; } } else { if (state->hi.lp_path) { free(state->hi.lp_path); state->hi.lp_path = NULL; } if (state->hi.stratum_url) { free(state->hi.stratum_url); state->hi.stratum_url = NULL; } } if (pool->force_rollntime) { state->hi.canroll = true; state->hi.hadexpire = true; state->hi.rolltime = pool->force_rollntime; } if (rolltime) *rolltime = state->hi.rolltime; pool->cgminer_pool_stats.rolltime = state->hi.rolltime; pool->cgminer_pool_stats.hadrolltime = state->hi.hadrolltime; pool->cgminer_pool_stats.canroll = state->hi.canroll; pool->cgminer_pool_stats.hadexpire = state->hi.hadexpire; val = JSON_LOADS(state->all_data.buf, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); if (opt_protocol) applog(LOG_DEBUG, "JSON protocol response:\n%s", (char*)state->all_data.buf); goto err_out; } if (opt_protocol) { char *s = json_dumps(val, JSON_INDENT(3)); applog(LOG_DEBUG, "JSON protocol response:\n%s", s); free(s); } /* JSON-RPC valid response returns a non-null 'result', * and a null 'error'. */ res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val ||(err_val && !json_is_null(err_val))) { char *s; if (err_val) s = json_dumps(err_val, JSON_INDENT(3)); else s = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC call failed: %s", s); free(s); json_decref(val); goto err_out; } if (state->hi.reason) { json_object_set_new(val, "reject-reason", json_string(state->hi.reason)); free(state->hi.reason); state->hi.reason = NULL; } successful_connect = true; databuf_free(&state->all_data); curl_slist_free_all(state->headers); curl_easy_reset(curl); free(state); return val; err_out: databuf_free(&state->all_data); curl_slist_free_all(state->headers); curl_easy_reset(curl); if (!successful_connect) applog(LOG_DEBUG, "Failed to connect in json_rpc_call"); curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); free(state); return NULL; } json_t *json_rpc_call(CURL *curl, const char *url, const char *userpass, const char *rpc_req, bool probe, bool longpoll, int *rolltime, struct pool *pool, bool share) { json_rpc_call_async(curl, url, userpass, rpc_req, longpoll, pool, share, NULL); int rc = curl_easy_perform(curl); return json_rpc_call_completed(curl, rc, probe, rolltime, NULL); } bool our_curl_supports_proxy_uris() { curl_version_info_data *data = curl_version_info(CURLVERSION_NOW); return data->age && data->version_num >= (( 7 <<16)|( 21 <<8)| 7); // 7.21.7 } // NOTE: This assumes reference URI is a root char *absolute_uri(char *uri, const char *ref) { if (strstr(uri, "://")) return strdup(uri); char *copy_start, *abs; bool need_slash = false; copy_start = (uri[0] == '/') ? &uri[1] : uri; if (ref[strlen(ref) - 1] != '/') need_slash = true; abs = malloc(strlen(ref) + strlen(copy_start) + 2); if (!abs) { applog(LOG_ERR, "Malloc failure in absolute_uri"); return NULL; } sprintf(abs, "%s%s%s", ref, need_slash ? "/" : "", copy_start); return abs; } static const char _hexchars[0x10] = "0123456789abcdef"; void bin2hex(char *out, const void *in, size_t len) { const unsigned char *p = in; while (len--) { (out++)[0] = _hexchars[p[0] >> 4]; (out++)[0] = _hexchars[p[0] & 0xf]; ++p; } out[0] = '\0'; } static inline int _hex2bin_char(const char c) { if (c >= '0' && c <= '9') return c - '0'; if (c >= 'a' && c <= 'f') return (c - 'a') + 10; if (c >= 'A' && c <= 'F') return (c - 'A') + 10; return -1; } /* Does the reverse of bin2hex but does not allocate any ram */ bool hex2bin(unsigned char *p, const char *hexstr, size_t len) { int n, o; while (len--) { n = _hex2bin_char((hexstr++)[0]); if (unlikely(n == -1)) { badchar: if (!hexstr[-1]) applog(LOG_ERR, "hex2bin: str truncated"); else applog(LOG_ERR, "hex2bin: invalid character 0x%02x", (int)hexstr[-1]); return false; } o = _hex2bin_char((hexstr++)[0]); if (unlikely(o == -1)) goto badchar; (p++)[0] = (n << 4) | o; } return likely(!hexstr[0]); } size_t ucs2_to_utf8(char * const out, const uint16_t * const in, const size_t sz) { uint8_t *p = (void*)out; for (int i = 0; i < sz; ++i) { const uint16_t c = in[i]; if (c < 0x80) p++[0] = c; else { if (c < 0x800) p++[0] = 0xc0 | (c >> 6); else { p++[0] = 0xe0 | (c >> 12); p++[0] = 0x80 | ((c >> 6) & 0x3f); } p++[0] = 0x80 | (c & 0x3f); } } return p - (uint8_t*)(void*)out; } char *ucs2_to_utf8_dup(uint16_t * const in, size_t sz) { char * const out = malloc((sz * 4) + 1); sz = ucs2_to_utf8(out, in, sz); out[sz] = '\0'; return out; } void hash_data(unsigned char *out_hash, const unsigned char *data) { unsigned char blkheader[80]; // data is past the first SHA256 step (padding and interpreting as big endian on a little endian platform), so we need to flip each 32-bit chunk around to get the original input block header swap32yes(blkheader, data, 80 / 4); // double-SHA256 to get the block hash gen_hash(blkheader, out_hash, 80); } // Example output: 0000000000000000000000000000000000000000000000000000ffff00000000 (bdiff 1) void real_block_target(unsigned char *target, const unsigned char *data) { uint8_t targetshift; if (unlikely(data[72] < 3 || data[72] > 0x20)) { // Invalid (out of bounds) target memset(target, 0xff, 32); return; } targetshift = data[72] - 3; memset(target, 0, targetshift); target[targetshift++] = data[75]; target[targetshift++] = data[74]; target[targetshift++] = data[73]; memset(&target[targetshift], 0, 0x20 - targetshift); } bool hash_target_check(const unsigned char *hash, const unsigned char *target) { const uint32_t *h32 = (uint32_t*)&hash[0]; const uint32_t *t32 = (uint32_t*)&target[0]; for (int i = 7; i >= 0; --i) { uint32_t h32i = le32toh(h32[i]); uint32_t t32i = le32toh(t32[i]); if (h32i > t32i) return false; if (h32i < t32i) return true; } return true; } bool hash_target_check_v(const unsigned char *hash, const unsigned char *target) { bool rc; rc = hash_target_check(hash, target); if (opt_debug) { unsigned char hash_swap[32], target_swap[32]; char hash_str[65]; char target_str[65]; for (int i = 0; i < 32; ++i) { hash_swap[i] = hash[31-i]; target_swap[i] = target[31-i]; } bin2hex(hash_str, hash_swap, 32); bin2hex(target_str, target_swap, 32); applog(LOG_DEBUG, " Proof: %s\nTarget: %s\nTrgVal? %s", hash_str, target_str, rc ? "YES (hash <= target)" : "no (false positive; hash > target)"); } return rc; } // This operates on a native-endian SHA256 state // In other words, on little endian platforms, every 4 bytes are in reverse order bool fulltest(const unsigned char *hash, const unsigned char *target) { unsigned char hash2[32]; swap32tobe(hash2, hash, 32 / 4); return hash_target_check_v(hash2, target); } struct thread_q *tq_new(void) { struct thread_q *tq; tq = calloc(1, sizeof(*tq)); if (!tq) return NULL; pthread_mutex_init(&tq->mutex, NULL); pthread_cond_init(&tq->cond, NULL); return tq; } void tq_free(struct thread_q *tq) { struct tq_ent *ent, *iter; if (!tq) return; DL_FOREACH_SAFE(tq->q, ent, iter) { DL_DELETE(tq->q, ent); free(ent); } pthread_cond_destroy(&tq->cond); pthread_mutex_destroy(&tq->mutex); memset(tq, 0, sizeof(*tq)); /* poison */ free(tq); } static void tq_freezethaw(struct thread_q *tq, bool frozen) { mutex_lock(&tq->mutex); tq->frozen = frozen; pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); } void tq_freeze(struct thread_q *tq) { tq_freezethaw(tq, true); } void tq_thaw(struct thread_q *tq) { tq_freezethaw(tq, false); } bool tq_push(struct thread_q *tq, void *data) { struct tq_ent *ent; bool rc = true; ent = calloc(1, sizeof(*ent)); if (!ent) return false; ent->data = data; mutex_lock(&tq->mutex); if (!tq->frozen) { DL_APPEND(tq->q, ent); } else { free(ent); rc = false; } pthread_cond_signal(&tq->cond); mutex_unlock(&tq->mutex); return rc; } void *tq_pop(struct thread_q *tq, const struct timespec *abstime) { struct tq_ent *ent; void *rval = NULL; int rc; mutex_lock(&tq->mutex); if (tq->q) goto pop; if (abstime) rc = pthread_cond_timedwait(&tq->cond, &tq->mutex, abstime); else rc = pthread_cond_wait(&tq->cond, &tq->mutex); if (rc) goto out; if (!tq->q) goto out; pop: ent = tq->q; rval = ent->data; DL_DELETE(tq->q, ent); free(ent); out: mutex_unlock(&tq->mutex); return rval; } int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg) { int rv = pthread_create(&thr->pth, attr, start, arg); if (likely(!rv)) thr->has_pth = true; return rv; } void thr_info_freeze(struct thr_info *thr) { struct tq_ent *ent, *iter; struct thread_q *tq; if (!thr) return; tq = thr->q; if (!tq) return; mutex_lock(&tq->mutex); tq->frozen = true; DL_FOREACH_SAFE(tq->q, ent, iter) { DL_DELETE(tq->q, ent); free(ent); } mutex_unlock(&tq->mutex); } void thr_info_cancel(struct thr_info *thr) { if (!thr) return; if (thr->has_pth) { pthread_cancel(thr->pth); thr->has_pth = false; } } #ifndef HAVE_PTHREAD_CANCEL // Bionic (Android) is intentionally missing pthread_cancel, so it is implemented using pthread_kill enum pthread_cancel_workaround_mode { PCWM_DEFAULT = 0, PCWM_TERMINATE = 1, PCWM_ASYNC = 2, PCWM_DISABLED = 4, PCWM_CANCELLED = 8, }; static pthread_key_t key_pcwm; struct sigaction pcwm_orig_term_handler; static void do_pthread_cancel_exit(int flags) { if (!(flags & PCWM_ASYNC)) // NOTE: Logging disables cancel while mutex held, so this is safe applog(LOG_WARNING, "pthread_cancel workaround: Cannot defer cancellation, terminating thread NOW"); pthread_exit(PTHREAD_CANCELED); } static void sighandler_pthread_cancel(int sig) { int flags = (int)pthread_getspecific(key_pcwm); if (flags & PCWM_TERMINATE) // Main thread { // Restore original handler and call it if (sigaction(sig, &pcwm_orig_term_handler, NULL)) quit(1, "pthread_cancel workaround: Failed to restore original handler"); raise(SIGTERM); quit(1, "pthread_cancel workaround: Original handler returned"); } if (flags & PCWM_CANCELLED) // Already pending cancel return; if (flags & PCWM_DISABLED) { flags |= PCWM_CANCELLED; if (pthread_setspecific(key_pcwm, (void*)flags)) quit(1, "pthread_cancel workaround: pthread_setspecific failed (setting PCWM_CANCELLED)"); return; } do_pthread_cancel_exit(flags); } void pthread_testcancel(void) { int flags = (int)pthread_getspecific(key_pcwm); if (flags & PCWM_CANCELLED && !(flags & PCWM_DISABLED)) do_pthread_cancel_exit(flags); } int pthread_setcancelstate(int state, int *oldstate) { int flags = (int)pthread_getspecific(key_pcwm); if (oldstate) *oldstate = (flags & PCWM_DISABLED) ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE; if (state == PTHREAD_CANCEL_DISABLE) flags |= PCWM_DISABLED; else { if (flags & PCWM_CANCELLED) do_pthread_cancel_exit(flags); flags &= ~PCWM_DISABLED; } if (pthread_setspecific(key_pcwm, (void*)flags)) return -1; return 0; } int pthread_setcanceltype(int type, int *oldtype) { int flags = (int)pthread_getspecific(key_pcwm); if (oldtype) *oldtype = (flags & PCWM_ASYNC) ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED; if (type == PTHREAD_CANCEL_ASYNCHRONOUS) flags |= PCWM_ASYNC; else flags &= ~PCWM_ASYNC; if (pthread_setspecific(key_pcwm, (void*)flags)) return -1; return 0; } void setup_pthread_cancel_workaround() { if (pthread_key_create(&key_pcwm, NULL)) quit(1, "pthread_cancel workaround: pthread_key_create failed"); if (pthread_setspecific(key_pcwm, (void*)PCWM_TERMINATE)) quit(1, "pthread_cancel workaround: pthread_setspecific failed"); struct sigaction new_sigact = { .sa_handler = sighandler_pthread_cancel, }; if (sigaction(SIGTERM, &new_sigact, &pcwm_orig_term_handler)) quit(1, "pthread_cancel workaround: Failed to install SIGTERM handler"); } #endif static void _now_gettimeofday(struct timeval *); static void _cgsleep_us_r_nanosleep(cgtimer_t *, int64_t); #ifdef HAVE_POOR_GETTIMEOFDAY static struct timeval tv_timeofday_offset; static struct timeval _tv_timeofday_lastchecked; static pthread_mutex_t _tv_timeofday_mutex = PTHREAD_MUTEX_INITIALIZER; static void bfg_calibrate_timeofday(struct timeval *expected, char *buf) { struct timeval actual, delta; timeradd(expected, &tv_timeofday_offset, expected); _now_gettimeofday(&actual); if (expected->tv_sec >= actual.tv_sec - 1 && expected->tv_sec <= actual.tv_sec + 1) // Within reason - no change necessary return; timersub(&actual, expected, &delta); timeradd(&tv_timeofday_offset, &delta, &tv_timeofday_offset); sprintf(buf, "Recalibrating timeofday offset (delta %ld.%06lds)", (long)delta.tv_sec, (long)delta.tv_usec); *expected = actual; } void bfg_gettimeofday(struct timeval *out) { char buf[64] = ""; timer_set_now(out); mutex_lock(&_tv_timeofday_mutex); if (_tv_timeofday_lastchecked.tv_sec < out->tv_sec - 21) bfg_calibrate_timeofday(out, buf); else timeradd(out, &tv_timeofday_offset, out); mutex_unlock(&_tv_timeofday_mutex); if (unlikely(buf[0])) applog(LOG_WARNING, "%s", buf); } #endif #ifdef WIN32 static LARGE_INTEGER _perffreq; static void _now_queryperformancecounter(struct timeval *tv) { LARGE_INTEGER now; if (unlikely(!QueryPerformanceCounter(&now))) quit(1, "QueryPerformanceCounter failed"); *tv = (struct timeval){ .tv_sec = now.QuadPart / _perffreq.QuadPart, .tv_usec = (now.QuadPart % _perffreq.QuadPart) * 1000000 / _perffreq.QuadPart, }; } #endif static void bfg_init_time(); static void _now_is_not_set(__maybe_unused struct timeval *tv) { bfg_init_time(); timer_set_now(tv); } void (*timer_set_now)(struct timeval *tv) = _now_is_not_set; void (*cgsleep_us_r)(cgtimer_t *, int64_t) = _cgsleep_us_r_nanosleep; #ifdef HAVE_CLOCK_GETTIME_MONOTONIC static clockid_t bfg_timer_clk; static void _now_clock_gettime(struct timeval *tv) { struct timespec ts; if (unlikely(clock_gettime(bfg_timer_clk, &ts))) quit(1, "clock_gettime failed"); *tv = (struct timeval){ .tv_sec = ts.tv_sec, .tv_usec = ts.tv_nsec / 1000, }; } #ifdef HAVE_CLOCK_NANOSLEEP static void _cgsleep_us_r_monotonic(cgtimer_t *tv_start, int64_t us) { struct timeval tv_end[1]; struct timespec ts_end[1]; int ret; timer_set_delay(tv_end, tv_start, us); timeval_to_spec(ts_end, tv_end); do { ret = clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); } while (ret == EINTR); } #endif static bool _bfg_try_clock_gettime(clockid_t clk) { struct timespec ts; if (clock_gettime(clk, &ts)) return false; bfg_timer_clk = clk; timer_set_now = _now_clock_gettime; return true; } #endif static void bfg_init_time() { if (timer_set_now != _now_is_not_set) return; #ifdef HAVE_CLOCK_GETTIME_MONOTONIC #ifdef HAVE_CLOCK_GETTIME_MONOTONIC_RAW if (_bfg_try_clock_gettime(CLOCK_MONOTONIC_RAW)) applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC_RAW)"); else #endif if (_bfg_try_clock_gettime(CLOCK_MONOTONIC)) { applog(LOG_DEBUG, "Timers: Using clock_gettime(CLOCK_MONOTONIC)"); #ifdef HAVE_CLOCK_NANOSLEEP cgsleep_us_r = _cgsleep_us_r_monotonic; #endif } else #endif #ifdef WIN32 if (QueryPerformanceFrequency(&_perffreq) && _perffreq.QuadPart) { timer_set_now = _now_queryperformancecounter; applog(LOG_DEBUG, "Timers: Using QueryPerformanceCounter"); } else #endif { timer_set_now = _now_gettimeofday; applog(LOG_DEBUG, "Timers: Using gettimeofday"); } #ifdef HAVE_POOR_GETTIMEOFDAY char buf[64] = ""; struct timeval tv; timer_set_now(&tv); bfg_calibrate_timeofday(&tv, buf); applog(LOG_DEBUG, "%s", buf); #endif } void subtime(struct timeval *a, struct timeval *b) { timersub(a, b, b); } void addtime(struct timeval *a, struct timeval *b) { timeradd(a, b, b); } bool time_more(struct timeval *a, struct timeval *b) { return timercmp(a, b, >); } bool time_less(struct timeval *a, struct timeval *b) { return timercmp(a, b, <); } void copy_time(struct timeval *dest, const struct timeval *src) { memcpy(dest, src, sizeof(struct timeval)); } void timespec_to_val(struct timeval *val, const struct timespec *spec) { val->tv_sec = spec->tv_sec; val->tv_usec = spec->tv_nsec / 1000; } void timeval_to_spec(struct timespec *spec, const struct timeval *val) { spec->tv_sec = val->tv_sec; spec->tv_nsec = val->tv_usec * 1000; } void us_to_timeval(struct timeval *val, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); val->tv_sec = tvdiv.quot; val->tv_usec = tvdiv.rem; } void us_to_timespec(struct timespec *spec, int64_t us) { lldiv_t tvdiv = lldiv(us, 1000000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000; } void ms_to_timespec(struct timespec *spec, int64_t ms) { lldiv_t tvdiv = lldiv(ms, 1000); spec->tv_sec = tvdiv.quot; spec->tv_nsec = tvdiv.rem * 1000000; } void timeraddspec(struct timespec *a, const struct timespec *b) { a->tv_sec += b->tv_sec; a->tv_nsec += b->tv_nsec; if (a->tv_nsec >= 1000000000) { a->tv_nsec -= 1000000000; a->tv_sec++; } } #ifndef WIN32 static void _now_gettimeofday(struct timeval *tv) { gettimeofday(tv, NULL); } #else /* Windows start time is since 1601 lol so convert it to unix epoch 1970. */ #define EPOCHFILETIME (116444736000000000LL) /* Return the system time as an lldiv_t in decimicroseconds. */ static void decius_time(lldiv_t *lidiv) { FILETIME ft; LARGE_INTEGER li; GetSystemTimeAsFileTime(&ft); li.LowPart = ft.dwLowDateTime; li.HighPart = ft.dwHighDateTime; li.QuadPart -= EPOCHFILETIME; /* SystemTime is in decimicroseconds so divide by an unusual number */ *lidiv = lldiv(li.QuadPart, 10000000); } void _now_gettimeofday(struct timeval *tv) { lldiv_t lidiv; decius_time(&lidiv); tv->tv_sec = lidiv.quot; tv->tv_usec = lidiv.rem / 10; } #endif void cgsleep_ms_r(cgtimer_t *tv_start, int ms) { cgsleep_us_r(tv_start, ((int64_t)ms) * 1000); } static void _cgsleep_us_r_nanosleep(cgtimer_t *tv_start, int64_t us) { struct timeval tv_timer[1], tv[1]; struct timespec ts[1]; timer_set_delay(tv_timer, tv_start, us); while (true) { timer_set_now(tv); if (!timercmp(tv_timer, tv, >)) return; timersub(tv_timer, tv, tv); timeval_to_spec(ts, tv); nanosleep(ts, NULL); } } void cgsleep_ms(int ms) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_ms_r(&ts_start, ms); } void cgsleep_us(int64_t us) { cgtimer_t ts_start; cgsleep_prepare_r(&ts_start); cgsleep_us_r(&ts_start, us); } /* Returns the microseconds difference between end and start times as a double */ double us_tdiff(struct timeval *end, struct timeval *start) { return end->tv_sec * 1000000 + end->tv_usec - start->tv_sec * 1000000 - start->tv_usec; } /* Returns the seconds difference between end and start times as a double */ double tdiff(struct timeval *end, struct timeval *start) { return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; } int32_t utf8_decode(const void *b, int *out_len) { int32_t w; const unsigned char *s = b; if (!(s[0] & 0x80)) { // ASCII *out_len = 1; return s[0]; } #ifdef STRICT_UTF8 if (unlikely(!(s[0] & 0x40))) goto invalid; #endif if (!(s[0] & 0x20)) *out_len = 2; else if (!(s[0] & 0x10)) *out_len = 3; else if (likely(!(s[0] & 8))) *out_len = 4; else goto invalid; w = s[0] & ((2 << (6 - *out_len)) - 1); for (int i = 1; i < *out_len; ++i) { #ifdef STRICT_UTF8 if (unlikely((s[i] & 0xc0) != 0x80)) goto invalid; #endif w = (w << 6) | (s[i] & 0x3f); } #if defined(STRICT_UTF8) if (unlikely(w > 0x10FFFF)) goto invalid; // FIXME: UTF-8 requires smallest possible encoding; check it #endif return w; invalid: *out_len = 1; return REPLACEMENT_CHAR; } static void _utf8_test(const char *s, const wchar_t expected, int expectedlen) { int len; wchar_t r; r = utf8_decode(s, &len); if (unlikely(r != expected || expectedlen != len)) applog(LOG_ERR, "UTF-8 test U+%06lX (len %d) failed: got U+%06lX (len %d)", (unsigned long)expected, expectedlen, (unsigned long)r, len); } #define _test_intrange(s, ...) _test_intrange(s, (int[]){ __VA_ARGS__ }) void utf8_test() { _utf8_test("", 0, 1); _utf8_test("\1", 1, 1); _utf8_test("\x7f", 0x7f, 1); #if WCHAR_MAX >= 0x80 _utf8_test("\xc2\x80", 0x80, 2); #if WCHAR_MAX >= 0xff _utf8_test("\xc3\xbf", 0xff, 2); #if WCHAR_MAX >= 0x7ff _utf8_test("\xdf\xbf", 0x7ff, 2); #if WCHAR_MAX >= 0x800 _utf8_test("\xe0\xa0\x80", 0x800, 3); #if WCHAR_MAX >= 0xffff _utf8_test("\xef\xbf\xbf", 0xffff, 3); #if WCHAR_MAX >= 0x10000 _utf8_test("\xf0\x90\x80\x80", 0x10000, 4); #if WCHAR_MAX >= 0x10ffff _utf8_test("\xf4\x8f\xbf\xbf", 0x10ffff, 4); #endif #endif #endif #endif #endif #endif #endif #ifdef STRICT_UTF8 _utf8_test("\x80", REPLACEMENT_CHAR, 1); _utf8_test("\xbf", REPLACEMENT_CHAR, 1); _utf8_test("\xfe", REPLACEMENT_CHAR, 1); _utf8_test("\xff", REPLACEMENT_CHAR, 1); #endif } bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) { char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; char url_address[256], port[6]; int url_len, port_len = 0; url_begin = strstr(url, "//"); if (!url_begin) url_begin = url; else url_begin += 2; /* Look for numeric ipv6 entries */ ipv6_begin = strstr(url_begin, "["); ipv6_end = strstr(url_begin, "]"); if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) url_end = strstr(ipv6_end, ":"); else url_end = strstr(url_begin, ":"); if (url_end) { url_len = url_end - url_begin; port_len = strlen(url_begin) - url_len - 1; if (port_len < 1) return false; port_start = url_end + 1; } else url_len = strlen(url_begin); if (url_len < 1) return false; sprintf(url_address, "%.*s", url_len, url_begin); if (port_len) { char *slash; snprintf(port, 6, "%.*s", port_len, port_start); slash = strchr(port, '/'); if (slash) *slash = '\0'; } else strcpy(port, "80"); free(*sockaddr_port); *sockaddr_port = strdup(port); free(*sockaddr_url); *sockaddr_url = strdup(url_address); return true; } enum send_ret { SEND_OK, SEND_SELECTFAIL, SEND_SENDFAIL, SEND_INACTIVE }; /* Send a single command across a socket, appending \n to it. This should all * be done under stratum lock except when first establishing the socket */ static enum send_ret __stratum_send(struct pool *pool, char *s, ssize_t len) { SOCKETTYPE sock = pool->sock; ssize_t ssent = 0; strcat(s, "\n"); len++; while (len > 0 ) { struct timeval timeout = {1, 0}; ssize_t sent; fd_set wd; FD_ZERO(&wd); FD_SET(sock, &wd); if (select(sock + 1, NULL, &wd, NULL, &timeout) < 1) return SEND_SELECTFAIL; #ifdef __APPLE__ sent = send(pool->sock, s + ssent, len, SO_NOSIGPIPE); #elif WIN32 sent = send(pool->sock, s + ssent, len, 0); #else sent = send(pool->sock, s + ssent, len, MSG_NOSIGNAL); #endif if (sent < 0) { if (!sock_blocks()) return SEND_SENDFAIL; sent = 0; } ssent += sent; len -= sent; } pool->cgminer_pool_stats.times_sent++; pool->cgminer_pool_stats.bytes_sent += ssent; total_bytes_sent += ssent; pool->cgminer_pool_stats.net_bytes_sent += ssent; return SEND_OK; } bool _stratum_send(struct pool *pool, char *s, ssize_t len, bool force) { enum send_ret ret = SEND_INACTIVE; if (opt_protocol) applog(LOG_DEBUG, "Pool %u: SEND: %s", pool->pool_no, s); mutex_lock(&pool->stratum_lock); if (pool->stratum_active || force) ret = __stratum_send(pool, s, len); mutex_unlock(&pool->stratum_lock); /* This is to avoid doing applog under stratum_lock */ switch (ret) { default: case SEND_OK: break; case SEND_SELECTFAIL: applog(LOG_DEBUG, "Write select failed on pool %d sock", pool->pool_no); suspend_stratum(pool); break; case SEND_SENDFAIL: applog(LOG_DEBUG, "Failed to send in stratum_send"); suspend_stratum(pool); break; case SEND_INACTIVE: applog(LOG_DEBUG, "Stratum send failed due to no pool stratum_active"); break; } return (ret == SEND_OK); } static bool socket_full(struct pool *pool, int wait) { SOCKETTYPE sock = pool->sock; struct timeval timeout; fd_set rd; if (sock == INVSOCK) return true; if (unlikely(wait < 0)) wait = 0; FD_ZERO(&rd); FD_SET(sock, &rd); timeout.tv_usec = 0; timeout.tv_sec = wait; if (select(sock + 1, &rd, NULL, NULL, &timeout) > 0) return true; return false; } /* Check to see if Santa's been good to you */ bool sock_full(struct pool *pool) { if (strlen(pool->sockbuf)) return true; return (socket_full(pool, 0)); } static void clear_sockbuf(struct pool *pool) { strcpy(pool->sockbuf, ""); } static void clear_sock(struct pool *pool) { ssize_t n; mutex_lock(&pool->stratum_lock); do { if (pool->sock) n = recv(pool->sock, pool->sockbuf, RECVSIZE, 0); else n = 0; } while (n > 0); mutex_unlock(&pool->stratum_lock); clear_sockbuf(pool); } /* Make sure the pool sockbuf is large enough to cope with any coinbase size * by reallocing it to a large enough size rounded up to a multiple of RBUFSIZE * and zeroing the new memory */ static void recalloc_sock(struct pool *pool, size_t len) { size_t old, new; old = strlen(pool->sockbuf); new = old + len + 1; if (new < pool->sockbuf_size) return; new = new + (RBUFSIZE - (new % RBUFSIZE)); // Avoid potentially recursive locking // applog(LOG_DEBUG, "Recallocing pool sockbuf to %lu", (unsigned long)new); pool->sockbuf = realloc(pool->sockbuf, new); if (!pool->sockbuf) quithere(1, "Failed to realloc pool sockbuf"); memset(pool->sockbuf + old, 0, new - old); pool->sockbuf_size = new; } /* Peeks at a socket to find the first end of line and then reads just that * from the socket and returns that as a malloced char */ char *recv_line(struct pool *pool) { char *tok, *sret = NULL; ssize_t len, buflen; int waited = 0; if (!strstr(pool->sockbuf, "\n")) { struct timeval rstart, now; cgtime(&rstart); if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for data on socket_full"); goto out; } do { char s[RBUFSIZE]; size_t slen; ssize_t n; memset(s, 0, RBUFSIZE); n = recv(pool->sock, s, RECVSIZE, 0); if (!n) { applog(LOG_DEBUG, "Socket closed waiting in recv_line"); suspend_stratum(pool); break; } cgtime(&now); waited = tdiff(&now, &rstart); if (n < 0) { //Save errno from being overweitten bei socket_ commands int socket_recv_errno; socket_recv_errno = SOCKERR; if (!sock_blocks() || !socket_full(pool, DEFAULT_SOCKWAIT - waited)) { applog(LOG_DEBUG, "Failed to recv sock in recv_line: %s", bfg_strerror(socket_recv_errno, BST_SOCKET)); suspend_stratum(pool); break; } } else { slen = strlen(s); recalloc_sock(pool, slen); strcat(pool->sockbuf, s); } } while (waited < DEFAULT_SOCKWAIT && !strstr(pool->sockbuf, "\n")); } buflen = strlen(pool->sockbuf); tok = strtok(pool->sockbuf, "\n"); if (!tok) { applog(LOG_DEBUG, "Failed to parse a \\n terminated string in recv_line"); goto out; } sret = strdup(tok); len = strlen(sret); /* Copy what's left in the buffer after the \n, including the * terminating \0 */ if (buflen > len + 1) memmove(pool->sockbuf, pool->sockbuf + len + 1, buflen - len + 1); else strcpy(pool->sockbuf, ""); pool->cgminer_pool_stats.times_received++; pool->cgminer_pool_stats.bytes_received += len; total_bytes_rcvd += len; pool->cgminer_pool_stats.net_bytes_received += len; out: if (!sret) clear_sock(pool); else if (opt_protocol) applog(LOG_DEBUG, "Pool %u: RECV: %s", pool->pool_no, sret); return sret; } /* Dumps any JSON value as a string. Just like jansson 2.1's JSON_ENCODE_ANY * flag, but this is compatible with 2.0. */ char *json_dumps_ANY(json_t *json, size_t flags) { switch (json_typeof(json)) { case JSON_ARRAY: case JSON_OBJECT: return json_dumps(json, flags); default: break; } char *rv; #ifdef JSON_ENCODE_ANY rv = json_dumps(json, JSON_ENCODE_ANY | flags); if (rv) return rv; #endif json_t *tmp = json_array(); char *s; int i; size_t len; if (!tmp) quithere(1, "Failed to allocate json array"); if (json_array_append(tmp, json)) quithere(1, "Failed to append temporary array"); s = json_dumps(tmp, flags); if (!s) return NULL; for (i = 0; s[i] != '['; ++i) if (unlikely(!(s[i] && isCspace(s[i])))) quithere(1, "Failed to find opening bracket in array dump"); len = strlen(&s[++i]) - 1; if (unlikely(s[i+len] != ']')) quithere(1, "Failed to find closing bracket in array dump"); rv = malloc(len + 1); memcpy(rv, &s[i], len); rv[len] = '\0'; free(s); json_decref(tmp); return rv; } /* Extracts a string value from a json array with error checking. To be used * when the value of the string returned is only examined and not to be stored. * See json_array_string below */ const char *__json_array_string(json_t *val, unsigned int entry) { json_t *arr_entry; if (json_is_null(val)) return NULL; if (!json_is_array(val)) return NULL; if (entry > json_array_size(val)) return NULL; arr_entry = json_array_get(val, entry); if (!json_is_string(arr_entry)) return NULL; return json_string_value(arr_entry); } /* Creates a freshly malloced dup of __json_array_string */ static char *json_array_string(json_t *val, unsigned int entry) { const char *buf = __json_array_string(val, entry); if (buf) return strdup(buf); return NULL; } void stratum_probe_transparency(struct pool *pool) { // Request transaction data to discourage pools from doing anything shady char s[1024]; int sLen; sLen = sprintf(s, "{\"params\": [\"%s\"], \"id\": \"txlist%s\", \"method\": \"mining.get_transactions\"}", pool->swork.job_id, pool->swork.job_id); stratum_send(pool, s, sLen); if ((!pool->swork.opaque) && !timer_isset(&pool->swork.tv_transparency)) cgtime(&pool->swork.tv_transparency); pool->swork.transparency_probed = true; } static bool parse_notify(struct pool *pool, json_t *val) { const char *prev_hash, *coinbase1, *coinbase2, *bbversion, *nbit, *ntime; char *job_id; bool clean, ret = false; int merkles, i; size_t cb1_len, cb2_len; json_t *arr; arr = json_array_get(val, 4); if (!arr || !json_is_array(arr)) goto out; merkles = json_array_size(arr); for (i = 0; i < merkles; i++) if (!json_is_string(json_array_get(arr, i))) goto out; prev_hash = __json_array_string(val, 1); coinbase1 = __json_array_string(val, 2); coinbase2 = __json_array_string(val, 3); bbversion = __json_array_string(val, 5); nbit = __json_array_string(val, 6); ntime = __json_array_string(val, 7); clean = json_is_true(json_array_get(val, 8)); if (!prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) goto out; job_id = json_array_string(val, 0); if (!job_id) goto out; cg_wlock(&pool->data_lock); cgtime(&pool->swork.tv_received); free(pool->swork.job_id); pool->swork.job_id = job_id; pool->submit_old = !clean; pool->swork.clean = true; hex2bin(&pool->swork.header1[0], bbversion, 4); hex2bin(&pool->swork.header1[4], prev_hash, 32); hex2bin((void*)&pool->swork.ntime, ntime, 4); pool->swork.ntime = be32toh(pool->swork.ntime); hex2bin(&pool->swork.diffbits[0], nbit, 4); cb1_len = strlen(coinbase1) / 2; pool->swork.nonce2_offset = cb1_len + pool->n1_len; cb2_len = strlen(coinbase2) / 2; bytes_resize(&pool->swork.coinbase, pool->swork.nonce2_offset + pool->n2size + cb2_len); uint8_t *coinbase = bytes_buf(&pool->swork.coinbase); hex2bin(coinbase, coinbase1, cb1_len); hex2bin(&coinbase[cb1_len], pool->nonce1, pool->n1_len); // NOTE: gap for nonce2, filled at work generation time hex2bin(&coinbase[pool->swork.nonce2_offset + pool->n2size], coinbase2, cb2_len); bytes_resize(&pool->swork.merkle_bin, 32 * merkles); for (i = 0; i < merkles; i++) hex2bin(&bytes_buf(&pool->swork.merkle_bin)[i * 32], json_string_value(json_array_get(arr, i)), 32); pool->swork.merkles = merkles; pool->nonce2 = 0; cg_wunlock(&pool->data_lock); applog(LOG_DEBUG, "Received stratum notify from pool %u with job_id=%s", pool->pool_no, job_id); if (opt_debug && opt_protocol) { applog(LOG_DEBUG, "job_id: %s", job_id); applog(LOG_DEBUG, "prev_hash: %s", prev_hash); applog(LOG_DEBUG, "coinbase1: %s", coinbase1); applog(LOG_DEBUG, "coinbase2: %s", coinbase2); for (i = 0; i < merkles; i++) applog(LOG_DEBUG, "merkle%d: %s", i, json_string_value(json_array_get(arr, i))); applog(LOG_DEBUG, "bbversion: %s", bbversion); applog(LOG_DEBUG, "nbit: %s", nbit); applog(LOG_DEBUG, "ntime: %s", ntime); applog(LOG_DEBUG, "clean: %s", clean ? "yes" : "no"); } /* A notify message is the closest stratum gets to a getwork */ pool->getwork_requested++; total_getworks++; if ((merkles && (!pool->swork.transparency_probed || rand() <= RAND_MAX / (opt_skip_checks + 1))) || timer_isset(&pool->swork.tv_transparency)) if (pool->probed) stratum_probe_transparency(pool); ret = true; out: return ret; } static bool parse_diff(struct pool *pool, json_t *val) { double diff; diff = json_number_value(json_array_get(val, 0)); if (diff == 0) return false; cg_wlock(&pool->data_lock); pool->swork.diff = diff; cg_wunlock(&pool->data_lock); applog(LOG_DEBUG, "Pool %d stratum bdifficulty set to %f", pool->pool_no, diff); return true; } static bool parse_reconnect(struct pool *pool, json_t *val) { const char *url; char address[256]; json_t *port_json; url = __json_array_string(val, 0); if (!url) url = pool->sockaddr_url; port_json = json_array_get(val, 1); if (json_is_number(port_json)) { const unsigned port = json_number_value(port_json); snprintf(address, sizeof(address), "%s:%u", url, port); } else { const char *port; if (json_is_string(port_json)) port = json_string_value(port_json); else port = pool->stratum_port; snprintf(address, sizeof(address), "%s:%s", url, port); } if (!extract_sockaddr(address, &pool->sockaddr_url, &pool->stratum_port)) return false; pool->stratum_url = pool->sockaddr_url; applog(LOG_NOTICE, "Reconnect requested from pool %d to %s", pool->pool_no, address); if (!restart_stratum(pool)) return false; return true; } static bool send_version(struct pool *pool, json_t *val) { char s[RBUFSIZE], *idstr; json_t *id = json_object_get(val, "id"); if (!(id && !json_is_null(id))) return false; idstr = json_dumps_ANY(id, 0); sprintf(s, "{\"id\": %s, \"result\": \""PACKAGE"/"VERSION"\", \"error\": null}", idstr); free(idstr); if (!stratum_send(pool, s, strlen(s))) return false; return true; } static bool stratum_show_message(struct pool *pool, json_t *val, json_t *params) { char *msg; char s[RBUFSIZE], *idstr; json_t *id = json_object_get(val, "id"); msg = json_array_string(params, 0); if (likely(msg)) { free(pool->admin_msg); pool->admin_msg = msg; applog(LOG_NOTICE, "Message from pool %u: %s", pool->pool_no, msg); } if (!(id && !json_is_null(id))) return true; idstr = json_dumps_ANY(id, 0); if (likely(msg)) sprintf(s, "{\"id\": %s, \"result\": true, \"error\": null}", idstr); else sprintf(s, "{\"id\": %s, \"result\": null, \"error\": [-1, \"Failed to parse message\", null]}", idstr); free(idstr); if (!stratum_send(pool, s, strlen(s))) return false; return true; } bool parse_method(struct pool *pool, char *s) { json_t *val = NULL, *method, *err_val, *params; json_error_t err; bool ret = false; const char *buf; if (!s) goto out; val = JSON_LOADS(s, &err); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } method = json_object_get(val, "method"); if (!method) goto out; err_val = json_object_get(val, "error"); params = json_object_get(val, "params"); if (err_val && !json_is_null(err_val)) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC method decode failed: %s", ss); free(ss); goto out; } buf = json_string_value(method); if (!buf) goto out; if (!strncasecmp(buf, "mining.notify", 13)) { if (parse_notify(pool, params)) pool->stratum_notify = ret = true; else pool->stratum_notify = ret = false; goto out; } if (!strncasecmp(buf, "mining.set_difficulty", 21) && parse_diff(pool, params)) { ret = true; goto out; } if (!strncasecmp(buf, "client.reconnect", 16) && parse_reconnect(pool, params)) { ret = true; goto out; } if (!strncasecmp(buf, "client.get_version", 18) && send_version(pool, val)) { ret = true; goto out; } if (!strncasecmp(buf, "client.show_message", 19) && stratum_show_message(pool, val, params)) { ret = true; goto out; } out: if (val) json_decref(val); return ret; } extern bool parse_stratum_response(struct pool *, char *s); bool auth_stratum(struct pool *pool) { json_t *val = NULL, *res_val, *err_val; char s[RBUFSIZE], *sret = NULL; json_error_t err; bool ret = false; sprintf(s, "{\"id\": \"auth\", \"method\": \"mining.authorize\", \"params\": [\"%s\", \"%s\"]}", pool->rpc_user, pool->rpc_pass); if (!stratum_send(pool, s, strlen(s))) goto out; /* Parse all data in the queue and anything left should be auth */ while (42) { sret = recv_line(pool); if (!sret) goto out; if (parse_method(pool, sret)) free(sret); else break; } val = JSON_LOADS(sret, &err); free(sret); res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_false(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_WARNING, "pool %d JSON stratum auth failed: %s", pool->pool_no, ss); free(ss); goto out; } ret = true; applog(LOG_INFO, "Stratum authorisation success for pool %d", pool->pool_no); pool->probed = true; successful_connect = true; out: if (val) json_decref(val); if (pool->stratum_notify) stratum_probe_transparency(pool); return ret; } curl_socket_t grab_socket_opensocket_cb(void *clientp, __maybe_unused curlsocktype purpose, struct curl_sockaddr *addr) { struct pool *pool = clientp; curl_socket_t sck = socket(addr->family, addr->socktype, addr->protocol); pool->sock = sck; return sck; } static bool setup_stratum_curl(struct pool *pool) { char curl_err_str[CURL_ERROR_SIZE]; CURL *curl = NULL; char s[RBUFSIZE]; bool ret = false; applog(LOG_DEBUG, "initiate_stratum with sockbuf=%p", pool->sockbuf); mutex_lock(&pool->stratum_lock); timer_unset(&pool->swork.tv_transparency); pool->stratum_active = false; pool->stratum_notify = false; pool->swork.transparency_probed = false; if (pool->stratum_curl) curl_easy_cleanup(pool->stratum_curl); pool->stratum_curl = curl_easy_init(); if (unlikely(!pool->stratum_curl)) quithere(1, "Failed to curl_easy_init"); if (pool->sockbuf) pool->sockbuf[0] = '\0'; curl = pool->stratum_curl; if (!pool->sockbuf) { pool->sockbuf = calloc(RBUFSIZE, 1); if (!pool->sockbuf) quithere(1, "Failed to calloc pool sockbuf"); pool->sockbuf_size = RBUFSIZE; } /* Create a http url for use with curl */ sprintf(s, "http://%s:%s", pool->sockaddr_url, pool->stratum_port); curl_easy_setopt(curl, CURLOPT_FRESH_CONNECT, 1); curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 30); curl_easy_setopt(curl, CURLOPT_ERRORBUFFER, curl_err_str); curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); curl_easy_setopt(curl, CURLOPT_URL, s); if (!opt_delaynet) curl_easy_setopt(curl, CURLOPT_TCP_NODELAY, 1); /* We use DEBUGFUNCTION to count bytes sent/received, and verbose is needed * to enable it */ curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, curl_debug_cb); curl_easy_setopt(curl, CURLOPT_DEBUGDATA, (void *)pool); curl_easy_setopt(curl, CURLOPT_VERBOSE, 1); // CURLINFO_LASTSOCKET is broken on Win64 (which has a wider SOCKET type than curl_easy_getinfo returns), so we use this hack for now curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, grab_socket_opensocket_cb); curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, pool); curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_TRY); if (pool->rpc_proxy) { curl_easy_setopt(curl, CURLOPT_HTTPPROXYTUNNEL, 1); curl_easy_setopt(curl, CURLOPT_PROXY, pool->rpc_proxy); } else if (opt_socks_proxy) { curl_easy_setopt(curl, CURLOPT_HTTPPROXYTUNNEL, 1); curl_easy_setopt(curl, CURLOPT_PROXY, opt_socks_proxy); curl_easy_setopt(curl, CURLOPT_PROXYTYPE, CURLPROXY_SOCKS5); } curl_easy_setopt(curl, CURLOPT_CONNECT_ONLY, 1); pool->sock = INVSOCK; if (curl_easy_perform(curl)) { applog(LOG_INFO, "Stratum connect failed to pool %d: %s", pool->pool_no, curl_err_str); errout: curl_easy_cleanup(curl); pool->stratum_curl = NULL; goto out; } if (pool->sock == INVSOCK) { applog(LOG_ERR, "Stratum connect succeeded, but technical problem extracting socket (pool %u)", pool->pool_no); goto errout; } keep_sockalive(pool->sock); pool->cgminer_pool_stats.times_sent++; pool->cgminer_pool_stats.times_received++; ret = true; out: mutex_unlock(&pool->stratum_lock); return ret; } static char *get_sessionid(json_t *val) { char *ret = NULL; json_t *arr_val; int arrsize, i; arr_val = json_array_get(val, 0); if (!arr_val || !json_is_array(arr_val)) goto out; arrsize = json_array_size(arr_val); for (i = 0; i < arrsize; i++) { json_t *arr = json_array_get(arr_val, i); const char *notify; if (!arr | !json_is_array(arr)) break; notify = __json_array_string(arr, 0); if (!notify) continue; if (!strncasecmp(notify, "mining.notify", 13)) { ret = json_array_string(arr, 1); break; } } out: return ret; } void suspend_stratum(struct pool *pool) { clear_sockbuf(pool); applog(LOG_INFO, "Closing socket for stratum pool %d", pool->pool_no); mutex_lock(&pool->stratum_lock); pool->stratum_active = pool->stratum_notify = false; if (pool->stratum_curl) { curl_easy_cleanup(pool->stratum_curl); } pool->stratum_curl = NULL; pool->sock = INVSOCK; mutex_unlock(&pool->stratum_lock); } bool initiate_stratum(struct pool *pool) { bool ret = false, recvd = false, noresume = false, sockd = false; bool trysuggest = request_target_str; char s[RBUFSIZE], *sret = NULL, *nonce1, *sessionid; json_t *val = NULL, *res_val, *err_val; json_error_t err; int n2size; resend: if (!setup_stratum_curl(pool)) { sockd = false; goto out; } sockd = true; clear_sock(pool); if (trysuggest) { int sz = sprintf(s, "{\"id\": null, \"method\": \"mining.suggest_target\", \"params\": [\"%s\"]}", request_target_str); if (!_stratum_send(pool, s, sz, true)) { applog(LOG_DEBUG, "Pool %u: Failed to send suggest_target in initiate_stratum", pool->pool_no); goto out; } recvd = true; } if (noresume) { sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": []}", swork_id++); } else { if (pool->sessionid) sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\", \"%s\"]}", swork_id++, pool->sessionid); else sprintf(s, "{\"id\": %d, \"method\": \"mining.subscribe\", \"params\": [\""PACKAGE"/"VERSION"\"]}", swork_id++); } if (!_stratum_send(pool, s, strlen(s), true)) { applog(LOG_DEBUG, "Failed to send s in initiate_stratum"); goto out; } recvd = true; if (!socket_full(pool, DEFAULT_SOCKWAIT)) { applog(LOG_DEBUG, "Timed out waiting for response in initiate_stratum"); goto out; } sret = recv_line(pool); if (!sret) goto out; val = JSON_LOADS(sret, &err); free(sret); if (!val) { applog(LOG_INFO, "JSON decode failed(%d): %s", err.line, err.text); goto out; } res_val = json_object_get(val, "result"); err_val = json_object_get(val, "error"); if (!res_val || json_is_null(res_val) || (err_val && !json_is_null(err_val))) { char *ss; if (err_val) ss = json_dumps(err_val, JSON_INDENT(3)); else ss = strdup("(unknown reason)"); applog(LOG_INFO, "JSON-RPC decode failed: %s", ss); free(ss); goto out; } sessionid = get_sessionid(res_val); if (!sessionid) applog(LOG_DEBUG, "Failed to get sessionid in initiate_stratum"); nonce1 = json_array_string(res_val, 1); if (!nonce1) { applog(LOG_INFO, "Failed to get nonce1 in initiate_stratum"); free(sessionid); goto out; } n2size = json_integer_value(json_array_get(res_val, 2)); if (!n2size) { applog(LOG_INFO, "Failed to get n2size in initiate_stratum"); free(sessionid); free(nonce1); goto out; } cg_wlock(&pool->data_lock); free(pool->sessionid); pool->sessionid = sessionid; free(pool->nonce1); pool->nonce1 = nonce1; pool->n1_len = strlen(nonce1) / 2; pool->n2size = n2size; pool->nonce2sz = (n2size > sizeof(pool->nonce2)) ? sizeof(pool->nonce2) : n2size; #ifdef WORDS_BIGENDIAN pool->nonce2off = (n2size < sizeof(pool->nonce2)) ? (sizeof(pool->nonce2) - n2size) : 0; #endif cg_wunlock(&pool->data_lock); if (sessionid) applog(LOG_DEBUG, "Pool %d stratum session id: %s", pool->pool_no, pool->sessionid); ret = true; out: if (val) { json_decref(val); val = NULL; } if (ret) { if (!pool->stratum_url) pool->stratum_url = pool->sockaddr_url; pool->stratum_active = true; pool->swork.diff = 1; if (opt_protocol) { applog(LOG_DEBUG, "Pool %d confirmed mining.subscribe with extranonce1 %s extran2size %d", pool->pool_no, pool->nonce1, pool->n2size); } } else { if (recvd) { if (trysuggest) { applog(LOG_DEBUG, "Pool %u: Failed to connect stratum with mining.suggest_target, retrying without", pool->pool_no); trysuggest = false; goto resend; } if (!noresume) { applog(LOG_DEBUG, "Failed to resume stratum, trying afresh"); noresume = true; goto resend; } } applog(LOG_DEBUG, "Initiate stratum failed"); if (sockd) suspend_stratum(pool); } return ret; } bool restart_stratum(struct pool *pool) { if (pool->stratum_active) suspend_stratum(pool); if (!initiate_stratum(pool)) return false; if (!auth_stratum(pool)) return false; return true; } void dev_error_update(struct cgpu_info *dev, enum dev_reason reason) { dev->device_last_not_well = time(NULL); cgtime(&dev->tv_device_last_not_well); dev->device_not_well_reason = reason; } void dev_error(struct cgpu_info *dev, enum dev_reason reason) { dev_error_update(dev, reason); switch (reason) { case REASON_THREAD_FAIL_INIT: dev->thread_fail_init_count++; break; case REASON_THREAD_ZERO_HASH: dev->thread_zero_hash_count++; break; case REASON_THREAD_FAIL_QUEUE: dev->thread_fail_queue_count++; break; case REASON_DEV_SICK_IDLE_60: dev->dev_sick_idle_60_count++; break; case REASON_DEV_DEAD_IDLE_600: dev->dev_dead_idle_600_count++; break; case REASON_DEV_NOSTART: dev->dev_nostart_count++; break; case REASON_DEV_OVER_HEAT: dev->dev_over_heat_count++; break; case REASON_DEV_THERMAL_CUTOFF: dev->dev_thermal_cutoff_count++; break; case REASON_DEV_COMMS_ERROR: dev->dev_comms_error_count++; break; case REASON_DEV_THROTTLE: dev->dev_throttle_count++; break; } } /* Realloc an existing string to fit an extra string s, appending s to it. */ void *realloc_strcat(char *ptr, char *s) { size_t old = strlen(ptr), len = strlen(s); char *ret; if (!len) return ptr; len += old + 1; align_len(&len); ret = malloc(len); if (unlikely(!ret)) quithere(1, "Failed to malloc"); sprintf(ret, "%s%s", ptr, s); free(ptr); return ret; } static bool sanechars[] = { false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, true , true , true , true , true , true , true , true , true , true , false, false, false, false, false, false, false, true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , false, false, false, false, false, false, true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , true , false, false, false, false, false, }; char *sanestr(char *o, char *s) { char *rv = o; bool br = false; for ( ; s[0]; ++s) { if (sanechars[s[0] & 0x7f]) { if (br) { br = false; if (s[0] >= '0' && s[0] <= '9') (o++)[0] = '_'; } (o++)[0] = s[0]; } else if (o != s && o[-1] >= '0' && o[-1] <= '9') br = true; } o[0] = '\0'; return rv; } void RenameThread(const char* name) { #if defined(PR_SET_NAME) // Only the first 15 characters are used (16 - NUL terminator) prctl(PR_SET_NAME, name, 0, 0, 0); #elif defined(__APPLE__) pthread_setname_np(name); #elif (defined(__FreeBSD__) || defined(__OpenBSD__)) pthread_set_name_np(pthread_self(), name); #else // Prevent warnings for unused parameters... (void)name; #endif } static pthread_key_t key_bfgtls; struct bfgtls_data { char *bfg_strerror_result; size_t bfg_strerror_resultsz; #ifdef WIN32 LPSTR bfg_strerror_socketresult; #endif #ifdef NEED_BFG_LOWL_VCOM struct detectone_meta_info_t __detectone_meta_info; #endif }; static struct bfgtls_data *get_bfgtls() { struct bfgtls_data *bfgtls = pthread_getspecific(key_bfgtls); if (bfgtls) return bfgtls; void *p; bfgtls = malloc(sizeof(*bfgtls)); if (!bfgtls) quithere(1, "malloc bfgtls failed"); p = malloc(64); if (!p) quithere(1, "malloc bfg_strerror_result failed"); *bfgtls = (struct bfgtls_data){ .bfg_strerror_resultsz = 64, .bfg_strerror_result = p, }; if (pthread_setspecific(key_bfgtls, bfgtls)) quithere(1, "pthread_setspecific failed"); return bfgtls; } static void bfgtls_free(void * const p) { struct bfgtls_data * const bfgtls = p; free(bfgtls->bfg_strerror_result); #ifdef WIN32 if (bfgtls->bfg_strerror_socketresult) LocalFree(bfgtls->bfg_strerror_socketresult); #endif free(bfgtls); } #ifdef NEED_BFG_LOWL_VCOM struct detectone_meta_info_t *_detectone_meta_info() { return &get_bfgtls()->__detectone_meta_info; } #endif void bfg_init_threadlocal() { if (pthread_key_create(&key_bfgtls, bfgtls_free)) quithere(1, "pthread_key_create failed"); } static bool bfg_grow_buffer(char ** const bufp, size_t * const bufszp, size_t minimum) { if (minimum <= *bufszp) return false; while (minimum > *bufszp) *bufszp = 2; *bufp = realloc(*bufp, *bufszp); if (unlikely(!*bufp)) quithere(1, "realloc failed"); return true; } static const char *bfg_strcpy_growing_buffer(char ** const bufp, size_t * const bufszp, const char *src) { if (!src) return NULL; const size_t srcsz = strlen(src) + 1; bfg_grow_buffer(bufp, bufszp, srcsz); memcpy(*bufp, src, srcsz); return *bufp; } // Guaranteed to always return some string (or quit) const char *bfg_strerror(int e, enum bfg_strerror_type type) { static __maybe_unused pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; struct bfgtls_data *bfgtls = get_bfgtls(); size_t * const bufszp = &bfgtls->bfg_strerror_resultsz; char ** const bufp = &bfgtls->bfg_strerror_result; const char *have = NULL; switch (type) { case BST_LIBUSB: // NOTE: Nested preprocessor checks since the latter isn't defined at all without the former #ifdef HAVE_LIBUSB # if HAVE_DECL_LIBUSB_ERROR_NAME // libusb makes no guarantees for thread-safety or persistence mutex_lock(&mutex); have = bfg_strcpy_growing_buffer(bufp, bufszp, libusb_error_name(e)); mutex_unlock(&mutex); # endif #endif break; case BST_SOCKET: case BST_SYSTEM: { #ifdef WIN32 // Windows has a different namespace for system and socket errors LPSTR *msg = &bfgtls->bfg_strerror_socketresult; if (*msg) LocalFree(*msg); if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, 0, e, 0, (LPSTR)msg, 0, 0)) { LPSTR msgp = *msg; size_t n = strlen(msgp); while (isCspace(msgp[--n])) msgp[n] = '\0'; return *msg; } *msg = NULL; break; #endif } // Fallthru on non-WIN32 case BST_ERRNO: { #ifdef __STRERROR_S_WORKS // FIXME: Not sure how to get this on MingW64 retry: if (likely(!strerror_s(*bufp, *bufszp, e))) { if (bfg_grow_buffer(bufp, bufszp, strlen(*bufp) + 2)) goto retry; return *bufp; } // TODO: XSI strerror_r // TODO: GNU strerror_r #else mutex_lock(&mutex); have = bfg_strcpy_growing_buffer(bufp, bufszp, strerror(e)); mutex_unlock(&mutex); #endif } } if (have) return *bufp; // Failback: Stringify the number static const char fmt[] = "%s error #%d", *typestr; switch (type) { case BST_ERRNO: typestr = "System"; break; case BST_SOCKET: typestr = "Socket"; break; case BST_LIBUSB: typestr = "libusb"; break; default: typestr = "Unexpected"; } int sz = snprintf((char*)bfgtls, 0, fmt, typestr, e) + 1; bfg_grow_buffer(bufp, bufszp, sz); sprintf(*bufp, fmt, typestr, e); return *bufp; } void notifier_init(notifier_t pipefd) { #ifdef WIN32 #define WindowsErrorStr(e) bfg_strerror(e, BST_SOCKET) SOCKET listener, connecter, acceptor; listener = socket(AF_INET, SOCK_STREAM, 0); if (listener == INVALID_SOCKET) quit(1, "Failed to create listener socket"IN_FMT_FFL": %s", __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError())); connecter = socket(AF_INET, SOCK_STREAM, 0); if (connecter == INVALID_SOCKET) quit(1, "Failed to create connect socket"IN_FMT_FFL": %s", __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError())); struct sockaddr_in inaddr = { .sin_family = AF_INET, .sin_addr = { .s_addr = htonl(INADDR_LOOPBACK), }, .sin_port = 0, }; { static const int reuse = 1; setsockopt(listener, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuse, sizeof(reuse)); } if (bind(listener, (struct sockaddr*)&inaddr, sizeof(inaddr)) == SOCKET_ERROR) quit(1, "Failed to bind listener socket"IN_FMT_FFL": %s", __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError())); socklen_t inaddr_sz = sizeof(inaddr); if (getsockname(listener, (struct sockaddr*)&inaddr, &inaddr_sz) == SOCKET_ERROR) quit(1, "Failed to getsockname"IN_FMT_FFL": %s", __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError())); if (listen(listener, 1) == SOCKET_ERROR) quit(1, "Failed to listen"IN_FMT_FFL": %s", __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError())); inaddr.sin_family = AF_INET; inaddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); if (connect(connecter, (struct sockaddr*)&inaddr, inaddr_sz) == SOCKET_ERROR) quit(1, "Failed to connect"IN_FMT_FFL": %s", __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError())); acceptor = accept(listener, NULL, NULL); if (acceptor == INVALID_SOCKET) quit(1, "Failed to accept"IN_FMT_FFL": %s", __FILE__, __func__, __LINE__, WindowsErrorStr(WSAGetLastError())); closesocket(listener); pipefd[0] = connecter; pipefd[1] = acceptor; #else if (pipe(pipefd)) quithere(1, "Failed to create pipe"); #endif } void notifier_wake(notifier_t fd) { if (fd[1] == INVSOCK) return; if (1 != #ifdef WIN32 send(fd[1], "\0", 1, 0) #else write(fd[1], "\0", 1) #endif ) applog(LOG_WARNING, "Error trying to wake notifier"); } void notifier_read(notifier_t fd) { char buf[0x10]; #ifdef WIN32 IGNORE_RETURN_VALUE(recv(fd[0], buf, sizeof(buf), 0)); #else IGNORE_RETURN_VALUE(read(fd[0], buf, sizeof(buf))); #endif } void notifier_init_invalid(notifier_t fd) { fd[0] = fd[1] = INVSOCK; } void notifier_destroy(notifier_t fd) { #ifdef WIN32 closesocket(fd[0]); closesocket(fd[1]); #else close(fd[0]); close(fd[1]); #endif fd[0] = fd[1] = INVSOCK; } void _bytes_alloc_failure(size_t sz) { quit(1, "bytes_resize failed to allocate %lu bytes", (unsigned long)sz); } void *cmd_thread(void *cmdp) { const char *cmd = cmdp; applog(LOG_DEBUG, "Executing command: %s", cmd); int rc = system(cmd); if (rc) applog(LOG_WARNING, "Command returned %d exit code: %s", rc, cmd); return NULL; } void run_cmd(const char *cmd) { if (!cmd) return; pthread_t pth; pthread_create(&pth, NULL, cmd_thread, (void*)cmd); } uint8_t crc5usb(unsigned char *ptr, uint8_t len) { uint8_t i, j, k; uint8_t crc = 0x1f; uint8_t crcin[5] = {1, 1, 1, 1, 1}; uint8_t crcout[5] = {1, 1, 1, 1, 1}; uint8_t din = 0; j = 0x80; k = 0; for (i = 0; i < len; i++) { if (*ptr & j) din = 1; else din = 0; crcout[0] = crcin[4] ^ din; crcout[1] = crcin[0]; crcout[2] = crcin[1] ^ crcin[4] ^ din; crcout[3] = crcin[2]; crcout[4] = crcin[3]; j = j >> 1; k++; if (k == 8) { j = 0x80; k = 0; ptr++; } memcpy(crcin, crcout, 5); } crc = 0; if(crcin[4]) crc |= 0x10; if(crcin[3]) crc |= 0x08; if(crcin[2]) crc |= 0x04; if(crcin[1]) crc |= 0x02; if(crcin[0]) crc |= 0x01; return crc; } static uint8_t _crc8ccitt_table[0x100]; void bfg_init_checksums(void) { for (int i = 0; i < 0x100; ++i) { uint8_t crc = i; for (int j = 0; j < 8; ++j) crc = (crc << 1) ^ ((crc & 0x80) ? 7 : 0); _crc8ccitt_table[i] = crc & 0xff; } } uint8_t crc8ccitt(const void * const buf, const size_t buflen) { const uint8_t *p = buf; uint8_t crc = 0xff; for (int i = 0; i < buflen; ++i) crc = _crc8ccitt_table[crc ^ *p++]; return crc; } bfgminer-bfgminer-3.10.0/util.h000066400000000000000000000300061226556647300163200ustar00rootroot00000000000000/* * Copyright 2013 Luke Dashjr * Copyright 2012-2013 Con Kolivas * Copyright 2011 Andrew Smith * Copyright 2011 Jeff Garzik * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 3 of the License, or (at your option) * any later version. See COPYING for more details. */ #ifndef __UTIL_H__ #define __UTIL_H__ #include #include #include #include #include #include #include "compat.h" #define INVALID_TIMESTAMP ((time_t)-1) #if defined(unix) || defined(__APPLE__) #include #include #include #include #define SOCKETTYPE int #define SOCKETFAIL(a) ((a) < 0) #define INVSOCK -1 #define INVINETADDR -1 #define CLOSESOCKET close #define SOCKERR (errno) #define SOCKERRMSG bfg_strerror(errno, BST_SOCKET) static inline bool sock_blocks(void) { return (errno == EAGAIN || errno == EWOULDBLOCK); } #elif defined WIN32 #include #include #define SOCKETTYPE SOCKET #define SOCKETFAIL(a) ((int)(a) == SOCKET_ERROR) #define INVSOCK INVALID_SOCKET #define INVINETADDR INADDR_NONE #define CLOSESOCKET closesocket #define SOCKERR (WSAGetLastError()) #define SOCKERRMSG bfg_strerror(WSAGetLastError(), BST_SOCKET) static inline bool sock_blocks(void) { return (WSAGetLastError() == WSAEWOULDBLOCK); } #ifndef SHUT_RDWR #define SHUT_RDWR SD_BOTH #endif #ifndef in_addr_t #define in_addr_t uint32_t #endif #endif #define IGNORE_RETURN_VALUE(expr) {if(expr);}(void)0 #if JANSSON_MAJOR_VERSION >= 2 #define JSON_LOADS(str, err_ptr) json_loads((str), 0, (err_ptr)) #else #define JSON_LOADS(str, err_ptr) json_loads((str), (err_ptr)) #endif extern char *json_dumps_ANY(json_t *, size_t flags); static inline const char *bfg_json_obj_string(json_t *json, const char *key, const char *fail) { json = json_object_get(json, key); if (!json) return fail; return json_string_value(json) ?: fail; } extern const char *__json_array_string(json_t *, unsigned int entry); static inline bool isCspace(int c) { switch (c) { case ' ': case '\f': case '\n': case '\r': case '\t': case '\v': return true; default: return false; } } typedef struct timeval cgtimer_t; struct thr_info; struct pool; enum dev_reason; struct cgpu_info; extern void json_rpc_call_async(CURL *, const char *url, const char *userpass, const char *rpc_req, bool longpoll, struct pool *pool, bool share, void *priv); extern json_t *json_rpc_call_completed(CURL *, int rc, bool probe, int *rolltime, void *out_priv); extern char *absolute_uri(char *uri, const char *ref); // ref must be a root URI extern size_t ucs2_to_utf8(char *out, const uint16_t *in, size_t sz); extern char *ucs2_to_utf8_dup(uint16_t *in, size_t sz); #define BFGINIT(var, val) do{ \ if (!(var)) \ (var) = val; \ }while(0) extern void gen_hash(unsigned char *data, unsigned char *hash, int len); extern void hash_data(unsigned char *out_hash, const unsigned char *data); extern void real_block_target(unsigned char *target, const unsigned char *data); extern bool hash_target_check(const unsigned char *hash, const unsigned char *target); extern bool hash_target_check_v(const unsigned char *hash, const unsigned char *target); int thr_info_create(struct thr_info *thr, pthread_attr_t *attr, void *(*start) (void *), void *arg); void thr_info_freeze(struct thr_info *thr); void thr_info_cancel(struct thr_info *thr); void subtime(struct timeval *a, struct timeval *b); void addtime(struct timeval *a, struct timeval *b); bool time_more(struct timeval *a, struct timeval *b); bool time_less(struct timeval *a, struct timeval *b); void copy_time(struct timeval *dest, const struct timeval *src); void timespec_to_val(struct timeval *val, const struct timespec *spec); void timeval_to_spec(struct timespec *spec, const struct timeval *val); void us_to_timeval(struct timeval *val, int64_t us); void us_to_timespec(struct timespec *spec, int64_t us); void ms_to_timespec(struct timespec *spec, int64_t ms); void timeraddspec(struct timespec *a, const struct timespec *b); void cgsleep_ms(int ms); void cgsleep_us(int64_t us); #define cgtimer_time(ts_start) timer_set_now(ts_start) #define cgsleep_prepare_r(ts_start) cgtimer_time(ts_start) void cgsleep_ms_r(cgtimer_t *ts_start, int ms); void (*cgsleep_us_r)(cgtimer_t *ts_start, int64_t us); static inline int cgtimer_to_ms(cgtimer_t *cgt) { return (cgt->tv_sec * 1000) + (cgt->tv_usec / 1000); } #define cgtimer_sub(a, b, res) timersub(a, b, res) double us_tdiff(struct timeval *end, struct timeval *start); double tdiff(struct timeval *end, struct timeval *start); bool _stratum_send(struct pool *pool, char *s, ssize_t len, bool force); #define stratum_send(pool, s, len) _stratum_send(pool, s, len, false) bool sock_full(struct pool *pool); char *recv_line(struct pool *pool); bool parse_method(struct pool *pool, char *s); bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port); bool auth_stratum(struct pool *pool); bool initiate_stratum(struct pool *pool); bool restart_stratum(struct pool *pool); void suspend_stratum(struct pool *pool); extern void dev_error_update(struct cgpu_info *, enum dev_reason); void dev_error(struct cgpu_info *dev, enum dev_reason reason); void *realloc_strcat(char *ptr, char *s); extern char *sanestr(char *o, char *s); void RenameThread(const char* name); enum bfg_strerror_type { BST_ERRNO, BST_SOCKET, BST_LIBUSB, BST_SYSTEM, }; extern const char *bfg_strerror(int, enum bfg_strerror_type); typedef SOCKETTYPE notifier_t[2]; extern void notifier_init(notifier_t); extern void notifier_wake(notifier_t); extern void notifier_read(notifier_t); extern void notifier_init_invalid(notifier_t); extern void notifier_destroy(notifier_t); /* Align a size_t to 4 byte boundaries for fussy arches */ static inline void align_len(size_t *len) { if (*len % 4) *len += 4 - (*len % 4); } typedef struct bytes_t { uint8_t *buf; size_t sz; size_t allocsz; } bytes_t; #define BYTES_INIT ((bytes_t){.buf=NULL,}) static inline void bytes_init(bytes_t *b) { *b = BYTES_INIT; } // This can't be inline without ugly const/non-const issues #define bytes_buf(b) ((b)->buf) static inline size_t bytes_len(const bytes_t *b) { return b->sz; } static inline ssize_t bytes_find(const bytes_t * const b, const uint8_t needle) { const size_t blen = bytes_len(b); const uint8_t * const buf = bytes_buf(b); for (int i = 0; i < blen; ++i) if (buf[i] == needle) return i; return -1; } extern void _bytes_alloc_failure(size_t); static inline void bytes_extend_buf(bytes_t * const b, const size_t newsz) { if (newsz <= b->allocsz) return; if (!b->allocsz) b->allocsz = 0x10; do { b->allocsz *= 2; } while (newsz > b->allocsz); b->buf = realloc(b->buf, b->allocsz); if (!b->buf) _bytes_alloc_failure(b->allocsz); } static inline void bytes_resize(bytes_t * const b, const size_t newsz) { bytes_extend_buf(b, newsz);; b->sz = newsz; } static inline void *bytes_preappend(bytes_t * const b, const size_t addsz) { size_t origsz = bytes_len(b); bytes_extend_buf(b, origsz + addsz); return &bytes_buf(b)[origsz]; } static inline void bytes_postappend(bytes_t * const b, const size_t addsz) { size_t origsz = bytes_len(b); bytes_resize(b, origsz + addsz); } static inline void bytes_append(bytes_t * const b, const void * const add, const size_t addsz) { void * const appendbuf = bytes_preappend(b, addsz); memcpy(appendbuf, add, addsz); bytes_postappend(b, addsz); } static inline void bytes_cat(bytes_t *b, const bytes_t *cat) { bytes_append(b, bytes_buf(cat), bytes_len(cat)); } static inline void bytes_cpy(bytes_t *dst, const bytes_t *src) { dst->sz = src->sz; if (!dst->sz) { dst->allocsz = 0; dst->buf = NULL; return; } dst->allocsz = src->allocsz; size_t half; while (dst->sz <= (half = dst->allocsz / 2)) dst->allocsz = half; dst->buf = malloc(dst->allocsz); memcpy(dst->buf, src->buf, dst->sz); } static inline void bytes_shift(bytes_t *b, size_t shift) { if (shift >= b->sz) { b->sz = 0; return; } b->sz -= shift; memmove(bytes_buf(b), &bytes_buf(b)[shift], bytes_len(b)); } static inline void bytes_reset(bytes_t *b) { b->sz = 0; } static inline void bytes_nullterminate(bytes_t *b) { bytes_append(b, "", 1); --b->sz; } static inline void bytes_free(bytes_t *b) { free(b->buf); b->sz = b->allocsz = 0; } static inline void set_maxfd(int *p_maxfd, int fd) { if (fd > *p_maxfd) *p_maxfd = fd; } static inline void timer_unset(struct timeval *tvp) { tvp->tv_sec = -1; } static inline bool timer_isset(const struct timeval *tvp) { return tvp->tv_sec != -1; } extern void (*timer_set_now)(struct timeval *); #define cgtime(tvp) timer_set_now(tvp) #define TIMEVAL_USECS(usecs) ( \ (struct timeval){ \ .tv_sec = (usecs) / 1000000, \ .tv_usec = (usecs) % 1000000, \ } \ ) static inline long timeval_to_us(const struct timeval *tvp) { return ((long)tvp->tv_sec * 1000000) + tvp->tv_usec; } #define timer_set_delay(tvp_timer, tvp_now, usecs) do { \ struct timeval tv_add = TIMEVAL_USECS(usecs); \ timeradd(&tv_add, tvp_now, tvp_timer); \ } while(0) #define timer_set_delay_from_now(tvp_timer, usecs) do { \ struct timeval tv_now; \ timer_set_now(&tv_now); \ timer_set_delay(tvp_timer, &tv_now, usecs); \ } while(0) static inline const struct timeval *_bfg_nullisnow(const struct timeval *tvp, struct timeval *tvp_buf) { if (tvp) return tvp; cgtime(tvp_buf); return tvp_buf; } static inline long timer_elapsed_us(const struct timeval *tvp_timer, const struct timeval *tvp_now) { struct timeval tv; const struct timeval *_tvp_now = _bfg_nullisnow(tvp_now, &tv); timersub(_tvp_now, tvp_timer, &tv); return timeval_to_us(&tv); } #define ms_tdiff(end, start) (timer_elapsed_us(start, end) / 1000) static inline int timer_elapsed(const struct timeval *tvp_timer, const struct timeval *tvp_now) { struct timeval tv; const struct timeval *_tvp_now = _bfg_nullisnow(tvp_now, &tv); timersub(_tvp_now, tvp_timer, &tv); return tv.tv_sec; } static inline bool timer_passed(const struct timeval *tvp_timer, const struct timeval *tvp_now) { if (!timer_isset(tvp_timer)) return false; struct timeval tv; const struct timeval *_tvp_now = _bfg_nullisnow(tvp_now, &tv); return timercmp(tvp_timer, _tvp_now, <); } #if defined(WIN32) && !defined(HAVE_POOR_GETTIMEOFDAY) #define HAVE_POOR_GETTIMEOFDAY #endif #ifdef HAVE_POOR_GETTIMEOFDAY extern void bfg_gettimeofday(struct timeval *); #else #define bfg_gettimeofday(out) gettimeofday(out, NULL) #endif static inline void reduce_timeout_to(struct timeval *tvp_timeout, struct timeval *tvp_time) { if (!timer_isset(tvp_time)) return; if ((!timer_isset(tvp_timeout)) || timercmp(tvp_time, tvp_timeout, <)) *tvp_timeout = *tvp_time; } static inline struct timeval *select_timeout(struct timeval *tvp_timeout, struct timeval *tvp_now) { if (!timer_isset(tvp_timeout)) return NULL; if (timercmp(tvp_timeout, tvp_now, <)) timerclear(tvp_timeout); else timersub(tvp_timeout, tvp_now, tvp_timeout); return tvp_timeout; } #define _SNP2(fn, ...) do{ \ int __n42 = fn(s, sz, __VA_ARGS__); \ s += __n42; \ sz = (sz <= __n42) ? 0 : (sz - __n42); \ rv += __n42; \ }while(0) #define _SNP(...) _SNP2(snprintf, __VA_ARGS__) #define REPLACEMENT_CHAR (0xFFFD) #define U8_DEGREE "\xc2\xb0" #define U8_HLINE "\xe2\x94\x80" #define U8_BTEE "\xe2\x94\xb4" extern int32_t utf8_decode(const void *, int *out_len); extern void utf8_test(); #define RUNONCE(rv) do { \ static bool _runonce = false; \ if (_runonce) \ return rv; \ _runonce = true; \ } while(0) static inline char *maybe_strdup(const char *s) { return s ? strdup(s) : NULL; } static inline void maybe_strdup_if_null(const char **p, const char *s) { if (!*p) *p = maybe_strdup(s); } extern void run_cmd(const char *cmd); extern uint8_t crc5usb(unsigned char *ptr, uint8_t len); extern void bfg_init_checksums(void); extern uint8_t crc8ccitt(const void *, size_t); #endif /* __UTIL_H__ */ bfgminer-bfgminer-3.10.0/warn-on-use.h000066400000000000000000000120161226556647300175170ustar00rootroot00000000000000/* A C macro for emitting warnings if a function is used. Copyright (C) 2010-2011 Free Software Foundation, Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . */ /* _GL_WARN_ON_USE (function, "literal string") issues a declaration for FUNCTION which will then trigger a compiler warning containing the text of "literal string" anywhere that function is called, if supported by the compiler. If the compiler does not support this feature, the macro expands to an unused extern declaration. This macro is useful for marking a function as a potential portability trap, with the intent that "literal string" include instructions on the replacement function that should be used instead. However, one of the reasons that a function is a portability trap is if it has the wrong signature. Declaring FUNCTION with a different signature in C is a compilation error, so this macro must use the same type as any existing declaration so that programs that avoid the problematic FUNCTION do not fail to compile merely because they included a header that poisoned the function. But this implies that _GL_WARN_ON_USE is only safe to use if FUNCTION is known to already have a declaration. Use of this macro implies that there must not be any other macro hiding the declaration of FUNCTION; but undefining FUNCTION first is part of the poisoning process anyway (although for symbols that are provided only via a macro, the result is a compilation error rather than a warning containing "literal string"). Also note that in C++, it is only safe to use if FUNCTION has no overloads. For an example, it is possible to poison 'getline' by: - adding a call to gl_WARN_ON_USE_PREPARE([[#include ]], [getline]) in configure.ac, which potentially defines HAVE_RAW_DECL_GETLINE - adding this code to a header that wraps the system : #undef getline #if HAVE_RAW_DECL_GETLINE _GL_WARN_ON_USE (getline, "getline is required by POSIX 2008, but" "not universally present; use the gnulib module getline"); #endif It is not possible to directly poison global variables. But it is possible to write a wrapper accessor function, and poison that (less common usage, like &environ, will cause a compilation error rather than issue the nice warning, but the end result of informing the developer about their portability problem is still achieved): #if HAVE_RAW_DECL_ENVIRON static inline char ***rpl_environ (void) { return &environ; } _GL_WARN_ON_USE (rpl_environ, "environ is not always properly declared"); # undef environ # define environ (*rpl_environ ()) #endif */ #ifndef _GL_WARN_ON_USE # if 4 < __GNUC__ || (__GNUC__ == 4 && 3 <= __GNUC_MINOR__) /* A compiler attribute is available in gcc versions 4.3.0 and later. */ # define _GL_WARN_ON_USE(function, message) \ extern __typeof__ (function) function __attribute__ ((__warning__ (message))) # elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING /* Verify the existence of the function. */ # define _GL_WARN_ON_USE(function, message) \ extern __typeof__ (function) function # else /* Unsupported. */ # define _GL_WARN_ON_USE(function, message) \ _GL_WARN_EXTERN_C int _gl_warn_on_use # endif #endif /* _GL_WARN_ON_USE_CXX (function, rettype, parameters_and_attributes, "string") is like _GL_WARN_ON_USE (function, "string"), except that the function is declared with the given prototype, consisting of return type, parameters, and attributes. This variant is useful for overloaded functions in C++. _GL_WARN_ON_USE does not work in this case. */ #ifndef _GL_WARN_ON_USE_CXX # if 4 < __GNUC__ || (__GNUC__ == 4 && 3 <= __GNUC_MINOR__) # define _GL_WARN_ON_USE_CXX(function,rettype,parameters_and_attributes,msg) \ extern rettype function parameters_and_attributes \ __attribute__ ((__warning__ (msg))) # elif __GNUC__ >= 3 && GNULIB_STRICT_CHECKING /* Verify the existence of the function. */ # define _GL_WARN_ON_USE_CXX(function,rettype,parameters_and_attributes,msg) \ extern rettype function parameters_and_attributes # else /* Unsupported. */ # define _GL_WARN_ON_USE_CXX(function,rettype,parameters_and_attributes,msg) \ _GL_WARN_EXTERN_C int _gl_warn_on_use # endif #endif /* _GL_WARN_EXTERN_C declaration; performs the declaration with C linkage. */ #ifndef _GL_WARN_EXTERN_C # if defined __cplusplus # define _GL_WARN_EXTERN_C extern "C" # else # define _GL_WARN_EXTERN_C extern # endif #endif bfgminer-bfgminer-3.10.0/windows-build.txt000066400000000000000000000165321226556647300205320ustar00rootroot00000000000000******************* ** Install MinGW ** ******************* 1. Go to https://sourceforge.net/downloads/mingw 2. Download and execute mingw-get-setup.exe 3. Complete the installation wizard leaving default values ********************* ** Configure MinGW ** ********************* 1. Launch the MinGW Installation Manager 2. Select Basic Setup on the left 3. Mark the following packages for installation: mingw-developer-toolkit, mingw32-base, mingw-gcc-g++, and msys-base 4. Select All Packages on the left 5. Mark the following packages for installation: mingw32-pthreads-w32 (dev), mingw32-libpdcurses (dev), mingw32-pdcurses (bin), msys-libopenssl (dev) 6. Click Installation > Apply Changes 7. Click Apply 8. Wait... 9. Click Close 10. Close MinGW Installation Manager ************************ ** MinGW Post-Install ** ************************ 1. Check your Start menu for “MinGW Shell” 2. If the shortcut exists, continue to the next section, otherwise: 3. Navigate to C:\MinGW\msys\1.0\postinstall 4. Run pi.bat & answer the questions 5. Navigate to C:\MinGW\msys\1.0 6. Right-click msys.bat and click Copy 7. Click the Start button 8. Right-click All Programs and click Open 9. Right-click on the opened folder and click “Paste shortcut” 10. Right-click the new shortcut and click Properties 11. On the General tab, rename the shortcut to “MinGW Shell” 12. On the Shortcut tab, change Start in to C:\MinGW\msys\1.0\bin 13. Click OK ****************************************** ** Install YASM (optional - CPU mining) ** ****************************************** 1. Visit http://yasm.tortall.net/Download.html 2. Download the Win32 or Win64 .exe, depending on your version of Windows 3. Rename the downloaded executable to yasm.exe 4. Copy the executable to C:\MinGW\bin ******************** ** Install uthash ** ******************** 1. Visit http://troydhanson.github.io/uthash/ 2. Download the uthash-master.zip file 3. Extract the contents of the uthash-master/src folder into C:\MinGW\include ****************************** ** Install GTK+ for Windows ** ****************************** 1. Visit http://sourceforge.net/projects/gtk-win/ 2. Download and execute the gtk2-runtime installer 3. Complete the installation wizard leaving default values 4. Copy libglib-2.0-0.dll from C:\Program Files\GTK2-Runtime\bin to C:\MinGW\bin ************************ ** Install pkg-config ** ************************ 1. Visit http://www.gtk.org/download/win32.php 2. Search for a version of pkg-config that includes both the Tool and Dev downloads 3. Click and download both the Tool link and the Dev link 4. Open the pkg-config zip file and extract the bin folder to C:\MinGW 5. Open the pkg-config-dev zip file and extract the share folder to C:\MinGW ********************* ** Install libcurl ** ********************* 1. Visit http://curl.haxx.se/download.html#Win32 2. Look for the Win32 - Generic heading 3. Download the link that indicates both *libcurl* (not just binary) and *SSL* 4. Open the zip file and extract the lib, include, and bin folders to C:\MinGW 5. Edit C:\MinGW\lib\pkgconfig\libcurl.pc 6. Change "-lcurl" to "-lcurl -lcurldll" ************************ ** Install libjansson ** ************************ 1. Visit http://www.digip.org/jansson/releases/ 2. Download the latest .tar.gz file (not doc.tar.gz) 3. Open the .tar.gz file and extract the jansson folder to C:\MinGW\msys\1.0\home\USER (where USER is your user name) 4. Click Start and launch MinGW Shell 5. Type the following (replace X.X with actual version): cd ~/jansson-X.X ./configure --prefix=/MinGW make make check make install ******************** ** Install libusb ** ******************** 1. Visit http://git.libusb.org/?p=libusb.git;a=snapshot;h=master;sf=zip 2. Download and open the resulting zip file 3. Extract the libusb-master folder to C:\MinGW\msys\1.0\home\USER 4. Return to the MinGW Shell 5. Type the following (replace XYZ with actual identifier): cd ~/libusb-master-XYZ ./autogen.sh --disable-debug-log --prefix=/MinGW make make install ****************************************************** ** Install libmicrohttpd (optional - Stratum Proxy) ** ****************************************************** 1. Visit http://ftp.gnu.org/gnu/libmicrohttpd/ 2. Download the latest w32.zip file 3. Open the zip file and extract the share, lib, include, and bin folders to C:\MinGW ************************************************* ** Install libevent (optional - Stratum Proxy) ** ************************************************* 1. Visit http://libevent.org/ 2. Download the latest stable.tar.gz file 3. Open the .tar.gz file and extract the libevent-X.Y.Z-stable folder to C:\MinGW\msys\1.0\home\USER 4. Return to the MinGW Shell 5. Type the following (replace X.Y.Z with actual version): cd ~/libevent-X.Y.Z-stable ./configure --disable-openssl --prefix=/MinGW make make install ******************************************************* ** Install HIDAPI (optional - Hashbuster & Nanofury) ** ******************************************************* 1. Visit https://github.com/signal11/hidapi 2. Click Releases and download the latest zip file 3. Open the zip file and extract the hidapi-hidapi folder to C:\MinGW\msys\1.0\home\USER 4. Return to the MinGW Shell 5. Type the following (replace X.Y.Z with actual version): cd ~/hidapi-hidapi-X.Y.Z ./bootstrap ./configure --prefix=/MinGW make make install ***************** ** Install Git ** ***************** 1. Visit http://git-scm.com/downloads 2. Click the Download for Windows 3. Run the resulting installer once downloaded 4. Complete the installation wizard leaving default values **************************** ** Configure Git in MinGW ** **************************** 1. Navigate to C:\MinGW\msys\1.0\home\USER 2. Create a new text file with Notepad called profile.txt with the contents: PATH=$PATH:/c/Program\ Files/Git/bin 3. Return to the MinGW Shell 4. Type the following: mv ~/profile.txt ~/.profile 5. Restart the MinGW Shell ****************** ** OS Header(s) ** ****************** 1. Navigate to C:\MinGW\include 2. Use Notepad to create a new file called mstcpip.h (not .txt) with the contents: struct tcp_keepalive { u_long onoff; u_long keepalivetime; u_long keepaliveinterval; }; #ifndef USE_WS_PREFIX #define SIO_KEEPALIVE_VALS _WSAIOW(IOC_VENDOR, 4) #else #define WS_SIO_KEEPALIVE_VALS _WSAIOW(WS_IOC_VENDOR, 4) #endif ********************** ** Compile bfgminer ** ********************** 1. Return to the MinGW Shell 2. Type the following: cd ~/ git clone git://github.com/luke-jr/bfgminer.git cd bfgminer/ ./autogen.sh ./configure make * Note: see the README for bfgminer ./configure options ********************** ** Package Binaries ** ********************** Create a new folder anywhere and copy the following items there: C:\MinGW\msys\1.0\home\USER\bfgminer bfgminer.exe bfgminer-rpc.exe *.cl COPYING LICENSE README* C:\MinGW\msys\1.0\home\USER\bfgminer\libblkmaker\.libs libblkmaker-0.1-0.dll libblkmaker_jansson-0.1-0.dll C:\MinGW\bin libcurl.dll libjansson-4.dll libmicrohttpd-10.dll libpdcursesw.dll pthreadGC2.dll libusb-1.0.dll libidn-11.dll libeay32.dll ssleay32.dll libgcc_s_dw2-1.dll libgnutls-28.dll libgcrypt-11.dll libplibc-1.dll libgmp.dll libintl-8.dll libgpg-error-0.dll libiconv-2.dll libevent-2-0-5.dll zlib1.dll bfgminer-bfgminer-3.10.0/x86_32/000077500000000000000000000000001226556647300161245ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/x86_32/.gitignore000066400000000000000000000000131226556647300201060ustar00rootroot00000000000000libx8632.a bfgminer-bfgminer-3.10.0/x86_32/Makefile.am000066400000000000000000000001701226556647300201560ustar00rootroot00000000000000noinst_LIBRARIES = libx8632.a SUFFIXES = .asm libx8632_a_SOURCES = sha256_xmm.asm .asm.o: $(YASM) -f $(YASM_FMT) $< bfgminer-bfgminer-3.10.0/x86_32/sha256_xmm.asm000066400000000000000000000162621226556647300205260ustar00rootroot00000000000000;; SHA-256 for X86 for Linux, based off of:A ; (c) Ufasoft 2011 http://ufasoft.com mailto:support@ufasoft.com ; Version 2011 ; This software is Public Domain ; SHA-256 CPU SSE cruncher for Bitcoin Miner ALIGN 32 BITS 32 %define hash ecx %define data edx %define init esi ; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16)) %define LAB_CALC_PARA 2 %define LAB_CALC_UNROLL 24 %define LAB_LOOP_UNROLL 64 extern sha256_consts_m128i global CalcSha256_x86 ; CalcSha256 hash(ecx), data(edx), init([esp+4]) CalcSha256_x86: push esi push edi mov init, [esp+12] LAB_SHA: lea edi, qword [data+256] ; + 256 LAB_CALC: %macro lab_calc_blk 1 movdqa xmm0, [edi-(15-%1)*16] ; xmm0 = W[I-15] movdqa xmm4, [edi-(15-(%1+1))*16] ; xmm4 = W[I-15+1] movdqa xmm2, xmm0 ; xmm2 = W[I-15] movdqa xmm6, xmm4 ; xmm6 = W[I-15+1] psrld xmm0, 3 ; xmm0 = W[I-15] >> 3 psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3 movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3 movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3 pslld xmm2, 14 ; xmm2 = W[I-15] << 14 pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14 psrld xmm1, 4 ; xmm1 = W[I-15] >> 7 psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7 pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) psrld xmm1, 11 ; xmm1 = W[I-15] >> 18 psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18 pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) pslld xmm2, 11 ; xmm2 = W[I-15] << 25 pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25 pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25) pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25) movdqa xmm3, [edi-(2-%1)*16] ; xmm3 = W[I-2] movdqa xmm7, [edi-(2-(%1+1))*16] ; xmm7 = W[I-2+1] paddd xmm0, [edi-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] paddd xmm4, [edi-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] ;;;;;;;;;;;;;;;;;; movdqa xmm2, xmm3 ; xmm2 = W[I-2] movdqa xmm6, xmm7 ; xmm6 = W[I-2+1] psrld xmm3, 10 ; xmm3 = W[I-2] >> 10 psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10 movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10 movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10 paddd xmm0, [edi-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7] pslld xmm2, 13 ; xmm2 = W[I-2] << 13 pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13 psrld xmm1, 7 ; xmm1 = W[I-2] >> 17 psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17 paddd xmm4, [edi-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1] pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) psrld xmm1, 2 ; xmm1 = W[I-2] >> 19 psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19 pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) pslld xmm2, 2 ; xmm2 = W[I-2] << 15 pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15 pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15) pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15) paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7] paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1] movdqa [edi+(%1*16)], xmm0 movdqa [edi+((%1+1)*16)], xmm4 %endmacro %assign i 0 %rep LAB_CALC_UNROLL lab_calc_blk i %assign i i+LAB_CALC_PARA %endrep ; Load the init values of the message into the hash. movdqa xmm7, [init] pshufd xmm5, xmm7, 0x55 ; xmm5 == b pshufd xmm4, xmm7, 0xAA ; xmm4 == c pshufd xmm3, xmm7, 0xFF ; xmm3 == d pshufd xmm7, xmm7, 0 ; xmm7 == a movdqa xmm0, [init+4*4] pshufd xmm1, xmm0, 0x55 ; [hash+0*16] == f movdqa [hash+0*16], xmm1 pshufd xmm1, xmm0, 0xAA ; [hash+1*16] == g movdqa [hash+1*16], xmm1 pshufd xmm1, xmm0, 0xFF ; [hash+2*16] == h movdqa [hash+2*16], xmm1 pshufd xmm0, xmm0, 0 ; xmm0 == e LAB_LOOP: ;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32(g_sha256_k[j]) + w[j] %macro lab_loop_blk 1 movdqa xmm6, [data+%1] paddd xmm6, sha256_consts_m128i[%1] paddd xmm6, [hash+2*16] ; +h movdqa xmm1, xmm0 movdqa xmm2, [hash+1*16] pandn xmm1, xmm2 ; ~e & g movdqa [hash+2*16], xmm2 ; h = g movdqa xmm2, [hash+0*16] ; f movdqa [hash+1*16], xmm2 ; g = f pand xmm2, xmm0 ; e & f pxor xmm1, xmm2 ; (e & f) ^ (~e & g) movdqa [hash+0*16], xmm0 ; f = e paddd xmm6, xmm1 ; Ch + h + w[i] + k[i] movdqa xmm1, xmm0 psrld xmm0, 6 movdqa xmm2, xmm0 pslld xmm1, 7 psrld xmm2, 5 pxor xmm0, xmm1 pxor xmm0, xmm2 pslld xmm1, 14 psrld xmm2, 14 pxor xmm0, xmm1 pxor xmm0, xmm2 pslld xmm1, 5 pxor xmm0, xmm1 ; Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25) paddd xmm6, xmm0 ; xmm6 = t1 movdqa xmm0, xmm3 ; d paddd xmm0, xmm6 ; e = d+t1 movdqa xmm1, xmm5 ; =b movdqa xmm3, xmm4 ; d = c movdqa xmm2, xmm4 ; c pand xmm2, xmm5 ; b & c pand xmm4, xmm7 ; a & c pand xmm1, xmm7 ; a & b pxor xmm1, xmm4 movdqa xmm4, xmm5 ; c = b movdqa xmm5, xmm7 ; b = a pxor xmm1, xmm2 ; (a & c) ^ (a & d) ^ (c & d) paddd xmm6, xmm1 ; t1 + ((a & c) ^ (a & d) ^ (c & d)) movdqa xmm2, xmm7 psrld xmm7, 2 movdqa xmm1, xmm7 pslld xmm2, 10 psrld xmm1, 11 pxor xmm7, xmm2 pxor xmm7, xmm1 pslld xmm2, 9 psrld xmm1, 9 pxor xmm7, xmm2 pxor xmm7, xmm1 pslld xmm2, 11 pxor xmm7, xmm2 paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d)); %endmacro %assign i 0 %rep LAB_LOOP_UNROLL lab_loop_blk i %assign i i+16 %endrep ; Finished the 64 rounds, calculate hash and save movdqa xmm1, [init+16] pshufd xmm2, xmm1, 0xFF movdqa xmm6, [hash+2*16] paddd xmm2, xmm6 movdqa [hash+7*16], xmm2 pshufd xmm2, xmm1, 0xAA movdqa xmm6, [hash+1*16] paddd xmm2, xmm6 movdqa [hash+6*16], xmm2 pshufd xmm2, xmm1, 0x55 movdqa xmm6, [hash+0*16] paddd xmm2, xmm6 movdqa [hash+5*16], xmm2 pshufd xmm1, xmm1, 0 paddd xmm0, xmm1 movdqa [hash+4*16], xmm0 movdqa xmm1, [init] pshufd xmm2, xmm1, 0xFF paddd xmm3, xmm2 movdqa [hash+3*16], xmm3 pshufd xmm2, xmm1, 0xAA paddd xmm4, xmm2 movdqa [hash+2*16], xmm4 pshufd xmm2, xmm1, 0x55 paddd xmm5, xmm2 movdqa [hash+1*16], xmm5 pshufd xmm1, xmm1, 0 paddd xmm7, xmm1 movdqa [hash+0*16], xmm7 LAB_RET: pop edi pop esi retn 4 %ifidn __OUTPUT_FORMAT__,elf section .note.GNU-stack noalloc noexec nowrite progbits %endif %ifidn __OUTPUT_FORMAT__,elf32 section .note.GNU-stack noalloc noexec nowrite progbits %endif bfgminer-bfgminer-3.10.0/x86_64/000077500000000000000000000000001226556647300161315ustar00rootroot00000000000000bfgminer-bfgminer-3.10.0/x86_64/.gitignore000066400000000000000000000000131226556647300201130ustar00rootroot00000000000000libx8664.a bfgminer-bfgminer-3.10.0/x86_64/Makefile.am000066400000000000000000000002321226556647300201620ustar00rootroot00000000000000noinst_LIBRARIES = libx8664.a SUFFIXES = .asm libx8664_a_SOURCES = sha256_xmm_amd64.asm sha256_sse4_amd64.asm .asm.o: $(YASM) -f $(YASM_FMT) -o $@ $< bfgminer-bfgminer-3.10.0/x86_64/sha256_sse4_amd64.asm000066400000000000000000000176471226556647300216130ustar00rootroot00000000000000;; SHA-256 for X86-64 for Linux, based off of: ; (c) Ufasoft 2011 http://ufasoft.com mailto:support@ufasoft.com ; Version 2011 ; This software is Public Domain ; Significant re-write/optimisation and reordering by, ; Neil Kettle ; ~18% performance improvement ; SHA-256 CPU SSE cruncher for Bitcoin Miner ALIGN 32 BITS 64 %ifidn __OUTPUT_FORMAT__,win64 %define hash rcx %define data rdx %define init r8 %define temp r9 %else %define hash rdi %define data rsi %define init rdx %define temp rcx %endif ; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16)) %define LAB_CALC_PARA 2 %define LAB_CALC_UNROLL 8 %define LAB_LOOP_UNROLL 8 extern g_4sha256_k global CalcSha256_x64_sse4 ; CalcSha256 hash(rdi), data(rsi), init(rdx) ; CalcSha256 hash(rcx), data(rdx), init(r8) CalcSha256_x64_sse4: push rbx %ifidn __OUTPUT_FORMAT__,win64 sub rsp, 16 * 6 movdqa [rsp + 16*0], xmm6 movdqa [rsp + 16*1], xmm7 movdqa [rsp + 16*2], xmm8 movdqa [rsp + 16*3], xmm9 movdqa [rsp + 16*4], xmm10 movdqa [rsp + 16*5], xmm11 %endif LAB_NEXT_NONCE: mov temp, 64*4 ; 256 - temp is # of SHA-2 rounds mov rax, 16*4 ; 64 - rax is where we expand to LAB_SHA: push temp lea temp, qword [data+temp*4] ; + 1024 lea r11, qword [data+rax*4] ; + 256 LAB_CALC: %macro lab_calc_blk 1 movntdqa xmm0, [r11-(15-%1)*16] ; xmm0 = W[I-15] movdqa xmm2, xmm0 ; xmm2 = W[I-15] movntdqa xmm4, [r11-(15-(%1+1))*16] ; xmm4 = W[I-15+1] movdqa xmm6, xmm4 ; xmm6 = W[I-15+1] psrld xmm0, 3 ; xmm0 = W[I-15] >> 3 movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3 pslld xmm2, 14 ; xmm2 = W[I-15] << 14 psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3 movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3 psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7 pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14 psrld xmm1, 4 ; xmm1 = W[I-15] >> 7 pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) psrld xmm1, 11 ; xmm1 = W[I-15] >> 18 psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18 pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) pslld xmm2, 11 ; xmm2 = W[I-15] << 25 pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25 pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25) pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25) paddd xmm0, [r11-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] paddd xmm4, [r11-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] movntdqa xmm3, [r11-(2-%1)*16] ; xmm3 = W[I-2] movntdqa xmm7, [r11-(2-(%1+1))*16] ; xmm7 = W[I-2+1] ;;;;;;;;;;;;;;;;;; movdqa xmm2, xmm3 ; xmm2 = W[I-2] psrld xmm3, 10 ; xmm3 = W[I-2] >> 10 movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10 movdqa xmm6, xmm7 ; xmm6 = W[I-2+1] psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10 movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10 paddd xmm0, [r11-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7] paddd xmm4, [r11-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1] pslld xmm2, 13 ; xmm2 = W[I-2] << 13 pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13 psrld xmm1, 7 ; xmm1 = W[I-2] >> 17 psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17 pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) psrld xmm1, 2 ; xmm1 = W[I-2] >> 19 pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) pslld xmm2, 2 ; xmm2 = W[I-2] << 15 pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19 pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15 pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15) paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7] pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15) paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1] movdqa [r11+(%1*16)], xmm0 movdqa [r11+((%1+1)*16)], xmm4 %endmacro %assign i 0 %rep LAB_CALC_UNROLL lab_calc_blk i %assign i i+LAB_CALC_PARA %endrep add r11, LAB_CALC_UNROLL*LAB_CALC_PARA*16 cmp r11, temp jb LAB_CALC pop temp mov rax, 0 ; Load the init values of the message into the hash. movntdqa xmm7, [init] pshufd xmm5, xmm7, 0x55 ; xmm5 == b pshufd xmm4, xmm7, 0xAA ; xmm4 == c pshufd xmm3, xmm7, 0xFF ; xmm3 == d pshufd xmm7, xmm7, 0 ; xmm7 == a movntdqa xmm0, [init+4*4] pshufd xmm8, xmm0, 0x55 ; xmm8 == f pshufd xmm9, xmm0, 0xAA ; xmm9 == g pshufd xmm10, xmm0, 0xFF ; xmm10 == h pshufd xmm0, xmm0, 0 ; xmm0 == e LAB_LOOP: ;; T t1 = h + (Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25)) + ((e & f) ^ AndNot(e, g)) + Expand32(g_sha256_k[j]) + w[j] %macro lab_loop_blk 0 movntdqa xmm6, [data+rax*4] paddd xmm6, g_4sha256_k[rax*4] add rax, 4 paddd xmm6, xmm10 ; +h movdqa xmm1, xmm0 movdqa xmm2, xmm9 pandn xmm1, xmm2 ; ~e & g movdqa xmm10, xmm2 ; h = g movdqa xmm2, xmm8 ; f movdqa xmm9, xmm2 ; g = f pand xmm2, xmm0 ; e & f pxor xmm1, xmm2 ; (e & f) ^ (~e & g) movdqa xmm8, xmm0 ; f = e paddd xmm6, xmm1 ; Ch + h + w[i] + k[i] movdqa xmm1, xmm0 psrld xmm0, 6 movdqa xmm2, xmm0 pslld xmm1, 7 psrld xmm2, 5 pxor xmm0, xmm1 pxor xmm0, xmm2 pslld xmm1, 14 psrld xmm2, 14 pxor xmm0, xmm1 pxor xmm0, xmm2 pslld xmm1, 5 pxor xmm0, xmm1 ; Rotr32(e, 6) ^ Rotr32(e, 11) ^ Rotr32(e, 25) paddd xmm6, xmm0 ; xmm6 = t1 movdqa xmm0, xmm3 ; d paddd xmm0, xmm6 ; e = d+t1 movdqa xmm1, xmm5 ; =b movdqa xmm3, xmm4 ; d = c movdqa xmm2, xmm4 ; c pand xmm2, xmm5 ; b & c pand xmm4, xmm7 ; a & c pand xmm1, xmm7 ; a & b pxor xmm1, xmm4 movdqa xmm4, xmm5 ; c = b movdqa xmm5, xmm7 ; b = a pxor xmm1, xmm2 ; (a & c) ^ (a & d) ^ (c & d) paddd xmm6, xmm1 ; t1 + ((a & c) ^ (a & d) ^ (c & d)) movdqa xmm2, xmm7 psrld xmm7, 2 movdqa xmm1, xmm7 pslld xmm2, 10 psrld xmm1, 11 pxor xmm7, xmm2 pxor xmm7, xmm1 pslld xmm2, 9 psrld xmm1, 9 pxor xmm7, xmm2 pxor xmm7, xmm1 pslld xmm2, 11 pxor xmm7, xmm2 paddd xmm7, xmm6 ; a = t1 + (Rotr32(a, 2) ^ Rotr32(a, 13) ^ Rotr32(a, 22)) + ((a & c) ^ (a & d) ^ (c & d)); %endmacro %assign i 0 %rep LAB_LOOP_UNROLL lab_loop_blk %assign i i+1 %endrep cmp rax, temp jb LAB_LOOP ; Finished the 64 rounds, calculate hash and save movntdqa xmm1, [init] pshufd xmm2, xmm1, 0x55 paddd xmm5, xmm2 pshufd xmm6, xmm1, 0xAA paddd xmm4, xmm6 pshufd xmm11, xmm1, 0xFF paddd xmm3, xmm11 pshufd xmm1, xmm1, 0 paddd xmm7, xmm1 movntdqa xmm1, [init+4*4] pshufd xmm2, xmm1, 0x55 paddd xmm8, xmm2 pshufd xmm6, xmm1, 0xAA paddd xmm9, xmm6 pshufd xmm11, xmm1, 0xFF paddd xmm10, xmm11 pshufd xmm1, xmm1, 0 paddd xmm0, xmm1 movdqa [hash+0*16], xmm7 movdqa [hash+1*16], xmm5 movdqa [hash+2*16], xmm4 movdqa [hash+3*16], xmm3 movdqa [hash+4*16], xmm0 movdqa [hash+5*16], xmm8 movdqa [hash+6*16], xmm9 movdqa [hash+7*16], xmm10 LAB_RET: %ifidn __OUTPUT_FORMAT__,win64 movdqa xmm6, [rsp + 16*0] movdqa xmm7, [rsp + 16*1] movdqa xmm8, [rsp + 16*2] movdqa xmm9, [rsp + 16*3] movdqa xmm10, [rsp + 16*4] movdqa xmm11, [rsp + 16*5] add rsp, 16 * 6 %endif pop rbx ret %ifidn __OUTPUT_FORMAT__,elf section .note.GNU-stack noalloc noexec nowrite progbits %endif %ifidn __OUTPUT_FORMAT__,elf64 section .note.GNU-stack noalloc noexec nowrite progbits %endif bfgminer-bfgminer-3.10.0/x86_64/sha256_xmm_amd64.asm000066400000000000000000000270031226556647300215210ustar00rootroot00000000000000;/* ; * Copyright (C) 2011 - Neil Kettle ; * ; * This file is part of cpuminer-ng. ; * ; * cpuminer-ng is free software: you can redistribute it and/or modify ; * it under the terms of the GNU General Public License as published by ; * the Free Software Foundation, either version 3 of the License, or ; * (at your option) any later version. ; * ; * cpuminer-ng is distributed in the hope that it will be useful, ; * but WITHOUT ANY WARRANTY; without even the implied warranty of ; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ; * GNU General Public License for more details. ; * ; * You should have received a copy of the GNU General Public License ; * along with cpuminer-ng. If not, see . ; */ ; %rbp, %rbx, and %r12-%r15 - callee save ALIGN 32 BITS 64 %ifidn __OUTPUT_FORMAT__,win64 %define hash rcx %define hash1 rdx %define data r8 %define init r9 %else %define hash rdi %define hash1 rsi %define data rdx %define init rcx %endif ; 0 = (1024 - 256) (mod (LAB_CALC_UNROLL*LAB_CALC_PARA*16)) %define SHA_CALC_W_PARA 2 %define SHA_CALC_W_UNROLL 8 %define SHA_ROUND_LOOP_UNROLL 16 extern sha256_consts_m128i extern sha256_init_sse2 global sha256_sse2_64_new %define sr1 xmm6 %define sr2 xmm1 %define sr3 xmm2 %define sr4 xmm13 %define rA xmm7 %define rB xmm5 %define rC xmm4 %define rD xmm3 %define rE xmm0 %define rF xmm8 %define rG xmm9 %define rH xmm10 %macro sha_round_blk 0 movdqa sr1, [data+rax] ; T1 = w; ;movdqa sr1, xmm11 movdqa sr2, rE ; sr2 = rE pandn sr2, rG ; sr2 = ~rE & rG movdqa sr3, rF ; sr3 = rF paddd sr1, rH ; T1 = h + sha256_consts_m128i[i] + w; movdqa rH, rG ; rH = rG pand sr3, rE ; sr3 = rE & rF movdqa rG, rF ; rG = rF %ifidn __YASM_OBJFMT__, macho64 paddd sr1, [rcx+rax] %else paddd sr1, sha256_consts_m128i[rax] ; T1 = sha256_consts_m128i[i] + w; %endif pxor sr2, sr3 ; sr2 = (rE & rF) ^ (~rE & rG) = Ch (e, f, g) movdqa rF, rE ; rF = rE paddd sr1, sr2 ; T1 = h + Ch (e, f, g) + sha256_consts_m128i[i] + w; movdqa sr2, rE ; sr2 = rE psrld rE, 6 ; e >> 6 movdqa sr3, rE ; e >> 6 pslld sr2, 7 ; e << 7 psrld sr3, 5 ; e >> 11 pxor rE, sr2 ; e >> 6 ^ e << 7 pslld sr2, 14 ; e << 21 pxor rE, sr3 ; e >> 6 ^ e << 7 ^ e >> 11 psrld sr3, 14 ; e >> 25 pxor rE, sr2 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21 pslld sr2, 5 ; e << 26 pxor rE, sr3 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21 ^ e >> 25 pxor rE, sr2 ; e >> 6 ^ e << 7 ^ e >> 11 ^ e << 21 ^ e >> 25 ^ e << 26 movdqa sr2, rB ; sr2 = rB paddd sr1, rE ; sr1 = h + BIGSIGMA1_256(e) + Ch (e, f, g) + sha256_consts_m128i[i] + w; movdqa rE, rD ; rE = rD movdqa rD, rC ; rD = rC paddd rE, sr1 ; rE = rD + T1 movdqa sr3, rC ; sr3 = rC pand rC, rA ; rC = rC & rA pand sr3, rB ; sr3 = rB & rC pand sr2, rA ; sr2 = rB & rA pxor sr2, rC ; sr2 = (rB & rA) ^ (rC & rA) movdqa rC, rB ; rC = rB pxor sr2, sr3 ; sr2 = (rB & rA) ^ (rC & rA) ^ (rB & rC) movdqa rB, rA ; rB = rA paddd sr1, sr2 ; sr1 = T1 + (rB & rA) ^ (rC & rA) ^ (rB & rC) lea rax, [rax+16] movdqa sr3, rA ; sr3 = rA psrld rA, 2 ; a >> 2 pslld sr3, 10 ; a << 10 movdqa sr2, rA ; a >> 2 pxor rA, sr3 ; a >> 2 ^ a << 10 psrld sr2, 11 ; a >> 13 pxor rA, sr2 ; a >> 2 ^ a << 10 ^ a >> 13 pslld sr3, 9 ; a << 19 pxor rA, sr3 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19 psrld sr2, 9 ; a >> 21 pxor rA, sr2 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19 ^ a >> 21 pslld sr3, 11 ; a << 30 pxor rA, sr3 ; a >> 2 ^ a << 10 ^ a >> 13 ^ a << 19 ^ a >> 21 ^ a << 30 paddd rA, sr1 ; T1 + BIGSIGMA0_256(a) + Maj(a, b, c); %endmacro %macro sha_calc_w_blk 1 movdqa xmm0, [r11-(15-%1)*16] ; xmm0 = W[I-15] movdqa xmm4, [r11-(15-(%1+1))*16] ; xmm4 = W[I-15+1] movdqa xmm2, xmm0 ; xmm2 = W[I-15] movdqa xmm6, xmm4 ; xmm6 = W[I-15+1] psrld xmm0, 3 ; xmm0 = W[I-15] >> 3 psrld xmm4, 3 ; xmm4 = W[I-15+1] >> 3 movdqa xmm1, xmm0 ; xmm1 = W[I-15] >> 3 movdqa xmm5, xmm4 ; xmm5 = W[I-15+1] >> 3 pslld xmm2, 14 ; xmm2 = W[I-15] << 14 pslld xmm6, 14 ; xmm6 = W[I-15+1] << 14 psrld xmm1, 4 ; xmm1 = W[I-15] >> 7 psrld xmm5, 4 ; xmm5 = W[I-15+1] >> 7 pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) psrld xmm1, 11 ; xmm1 = W[I-15] >> 18 psrld xmm5, 11 ; xmm5 = W[I-15+1] >> 18 pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) pslld xmm2, 11 ; xmm2 = W[I-15] << 25 pslld xmm6, 11 ; xmm6 = W[I-15+1] << 25 pxor xmm0, xmm1 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) pxor xmm4, xmm5 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) pxor xmm0, xmm2 ; xmm0 = (W[I-15] >> 3) ^ (W[I-15] >> 7) ^ (W[I-15] << 14) ^ (W[I-15] >> 18) ^ (W[I-15] << 25) pxor xmm4, xmm6 ; xmm4 = (W[I-15+1] >> 3) ^ (W[I-15+1] >> 7) ^ (W[I-15+1] << 14) ^ (W[I-15+1] >> 18) ^ (W[I-15+1] << 25) movdqa xmm3, [r11-(2-%1)*16] ; xmm3 = W[I-2] movdqa xmm7, [r11-(2-(%1+1))*16] ; xmm7 = W[I-2+1] paddd xmm0, [r11-(16-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] paddd xmm4, [r11-(16-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] ;;;;;;;;;;;;;;;;;; movdqa xmm2, xmm3 ; xmm2 = W[I-2] movdqa xmm6, xmm7 ; xmm6 = W[I-2+1] psrld xmm3, 10 ; xmm3 = W[I-2] >> 10 psrld xmm7, 10 ; xmm7 = W[I-2+1] >> 10 movdqa xmm1, xmm3 ; xmm1 = W[I-2] >> 10 movdqa xmm5, xmm7 ; xmm5 = W[I-2+1] >> 10 paddd xmm0, [r11-(7-%1)*16] ; xmm0 = s0(W[I-15]) + W[I-16] + W[I-7] pslld xmm2, 13 ; xmm2 = W[I-2] << 13 pslld xmm6, 13 ; xmm6 = W[I-2+1] << 13 psrld xmm1, 7 ; xmm1 = W[I-2] >> 17 psrld xmm5, 7 ; xmm5 = W[I-2+1] >> 17 paddd xmm4, [r11-(7-(%1+1))*16] ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + W[I-7+1] pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) psrld xmm1, 2 ; xmm1 = W[I-2] >> 19 psrld xmm5, 2 ; xmm5 = W[I-2+1] >> 19 pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) pslld xmm2, 2 ; xmm2 = W[I-2] << 15 pslld xmm6, 2 ; xmm6 = W[I-2+1] << 15 pxor xmm3, xmm1 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) pxor xmm7, xmm5 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) pxor xmm3, xmm2 ; xmm3 = (W[I-2] >> 10) ^ (W[I-2] >> 17) ^ (W[I-2] << 13) ^ (W[I-2] >> 19) ^ (W[I-2] << 15) pxor xmm7, xmm6 ; xmm7 = (W[I-2+1] >> 10) ^ (W[I-2+1] >> 17) ^ (W[I-2+1] << 13) ^ (W[I-2+1] >> 19) ^ (W[I-2+1] << 15) paddd xmm0, xmm3 ; xmm0 = s0(W[I-15]) + W[I-16] + s1(W[I-2]) + W[I-7] paddd xmm4, xmm7 ; xmm4 = s0(W[I-15+1]) + W[I-16+1] + s1(W[I-2+1]) + W[I-7+1] movdqa [r11+(%1*16)], xmm0 movdqa [r11+((%1+1)*16)], xmm4 %endmacro ; _sha256_sse2_64_new hash(rdi), hash1(rsi), data(rdx), init(rcx), sha256_sse2_64_new: push rbx %ifidn __OUTPUT_FORMAT__,win64 sub rsp, 16 * 6 movdqa [rsp + 16*0], xmm6 movdqa [rsp + 16*1], xmm7 movdqa [rsp + 16*2], xmm8 movdqa [rsp + 16*3], xmm9 movdqa [rsp + 16*4], xmm10 movdqa [rsp + 16*5], xmm13 %endif %macro SHA_256 0 mov rbx, 64*4 ; rbx is # of SHA-2 rounds mov rax, 16*4 ; rax is where we expand to push rbx lea rbx, qword [data+rbx*4] lea r11, qword [data+rax*4] %%SHA_CALC_W: %assign i 0 %rep SHA_CALC_W_UNROLL sha_calc_w_blk i %assign i i+SHA_CALC_W_PARA %endrep add r11, SHA_CALC_W_UNROLL*SHA_CALC_W_PARA*16 cmp r11, rbx jb %%SHA_CALC_W pop rbx mov rax, 0 lea rbx, [rbx*4] movdqa rA, [init] pshufd rB, rA, 0x55 ; rB == B pshufd rC, rA, 0xAA ; rC == C pshufd rD, rA, 0xFF ; rD == D pshufd rA, rA, 0 ; rA == A movdqa rE, [init+4*4] pshufd rF, rE, 0x55 ; rF == F pshufd rG, rE, 0xAA ; rG == G pshufd rH, rE, 0xFF ; rH == H pshufd rE, rE, 0 ; rE == E %ifidn __YASM_OBJFMT__, macho64 lea rcx, [sha256_consts_m128i wrt rip] %endif %%SHAROUND_LOOP: %assign i 0 %rep SHA_ROUND_LOOP_UNROLL sha_round_blk %assign i i+1 %endrep cmp rax, rbx jb %%SHAROUND_LOOP ; Finished the 64 rounds, calculate hash and save movdqa sr1, [init] pshufd sr2, sr1, 0x55 pshufd sr3, sr1, 0xAA pshufd sr4, sr1, 0xFF pshufd sr1, sr1, 0 paddd rB, sr2 paddd rC, sr3 paddd rD, sr4 paddd rA, sr1 movdqa sr1, [init+4*4] pshufd sr2, sr1, 0x55 pshufd sr3, sr1, 0xAA pshufd sr4, sr1, 0xFF pshufd sr1, sr1, 0 paddd rF, sr2 paddd rG, sr3 paddd rH, sr4 paddd rE, sr1 %endmacro SHA_256 movdqa [hash1+0*16], rA movdqa [hash1+1*16], rB movdqa [hash1+2*16], rC movdqa [hash1+3*16], rD movdqa [hash1+4*16], rE movdqa [hash1+5*16], rF movdqa [hash1+6*16], rG movdqa [hash1+7*16], rH mov data, hash1 mov init, qword sha256_init_sse2 SHA_256 movdqa [hash+7*16], rH LAB_RET: %ifidn __OUTPUT_FORMAT__,win64 movdqa xmm6, [rsp + 16*0] movdqa xmm7, [rsp + 16*1] movdqa xmm8, [rsp + 16*2] movdqa xmm9, [rsp + 16*3] movdqa xmm10, [rsp + 16*4] movdqa xmm13, [rsp + 16*5] add rsp, 16 * 6 %endif pop rbx ret %ifidn __OUTPUT_FORMAT__,elf section .note.GNU-stack noalloc noexec nowrite progbits %endif %ifidn __OUTPUT_FORMAT__,elf64 section .note.GNU-stack noalloc noexec nowrite progbits %endif