pax_global_header 0000666 0000000 0000000 00000000064 14513675524 0014526 g ustar 00root root 0000000 0000000 52 comment=15a1c6426a0cf86be7388cb251d7071c840fe386
onedriver-0.14.1/ 0000775 0000000 0000000 00000000000 14513675524 0013606 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/.bumpversion.cfg 0000664 0000000 0000000 00000000415 14513675524 0016716 0 ustar 00root root 0000000 0000000 [bumpversion]
current_version = 0.14.1
commit = False
tag = False
[bumpversion:file:cmd/common/common.go]
[bumpversion:file:pkg/resources/onedriver.1]
[bumpversion:file:onedriver.spec]
search = Version: {current_version}
replace = Version: {new_version}
onedriver-0.14.1/.copr/ 0000775 0000000 0000000 00000000000 14513675524 0014627 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/.copr/Makefile 0000664 0000000 0000000 00000000131 14513675524 0016262 0 ustar 00root root 0000000 0000000 srpm:
dnf -y install golang git rsync
$(MAKE) -f Makefile srpm
cp *.src.rpm $(outdir)
onedriver-0.14.1/.github/ 0000775 0000000 0000000 00000000000 14513675524 0015146 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/.github/FUNDING.yml 0000664 0000000 0000000 00000000022 14513675524 0016755 0 ustar 00root root 0000000 0000000 ---
github: jstaf
onedriver-0.14.1/.github/workflows/ 0000775 0000000 0000000 00000000000 14513675524 0017203 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/.github/workflows/ci.yml 0000664 0000000 0000000 00000007542 14513675524 0020331 0 ustar 00root root 0000000 0000000 name: Run tests
on: push
jobs:
test:
name: Run tests
runs-on: ubuntu-20.04
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
account_type:
- personal
- business
steps:
- uses: actions/checkout@v2
- uses: actions/setup-go@v2
with:
go-version: "1.17"
- name: Install apt dependencies
run: |
sudo apt update
sudo apt install \
gcc \
pkg-config \
libwebkit2gtk-4.0-dev \
libjson-glib-dev \
make \
wget \
rpm \
awscli \
libreoffice
sudo rm /usr/local/bin/aws # whyyy
- uses: actions/cache@v2
with:
path: |
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-go-
- name: Check go formatting
run: |
go install golang.org/x/tools/cmd/goimports@latest
goimports -l .
- name: Copy auth tokens from S3
run: |
which aws
aws --version
aws s3 cp s3://fusefs-travis/$ACCOUNT_TYPE/.auth_tokens.json .
aws s3 cp s3://fusefs-travis/dmel.fa.gz .
gunzip dmel.fa.gz
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
ACCOUNT_TYPE: ${{ matrix.account_type }}
- name: Run tests
run: |
go install github.com/rakyll/gotest@latest
# cannot run systemd tests here because github actions runners don't have dbus setup +
# if CGO is on, the UI tests will take foreverrrrr
bash cgo-helper.sh
CGO_ENABLED=0 gotest -v -covermode=count -coverpkg=./ui/... -coverprofile=ui.coverage ./ui
gotest -v -covermode=count -coverpkg=./cmd/common -coverprofile=common.coverage ./cmd/common
gotest -v -covermode=count -coverpkg=./fs/... -coverprofile=quickxorhash.coverage ./fs/graph/quickxorhash
gotest -v -covermode=count -coverpkg=./fs/... -coverprofile=graph.coverage ./fs/graph
gotest -v -covermode=count -coverpkg=./fs/... -coverprofile=fs.coverage ./fs
go test -c -covermode=count -coverpkg=./fs/... ./fs/offline
sudo unshare -n -S $(id -u) -G $(id -g) ./offline.test -test.v -test.coverprofile=offline.coverage
- name: Copy new auth tokens to S3
run: |
/usr/bin/aws s3 cp .auth_tokens.json s3://fusefs-travis/$ACCOUNT_TYPE/
/usr/bin/aws s3 cp fusefs_tests.log s3://fusefs-travis/$ACCOUNT_TYPE/
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
ACCOUNT_TYPE: ${{ matrix.account_type }}
if: always()
- name: Combine test coverage into single file
run: |
go install github.com/wadey/gocovmerge@latest
gocovmerge *.coverage > coverage.out
if: always()
- name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.0.5
if: always()
- name: Send test coverage to Coveralls
uses: coverallsapp/github-action@v1.1.2
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
path-to-lcov: coverage.lcov
flag-name: ${{ matrix.account_type }}
parallel: true
# decreased coverage isn't a failure
continue-on-error: true
if: always()
finish:
name: Complete Coveralls run
needs: test
if: always()
runs-on: ubuntu-20.04
steps:
- name: Coveralls finished
uses: coverallsapp/github-action@v1.1.2
with:
github-token: ${{ secrets.github_token }}
parallel-finished: true
# decreased coverage isn't a failure
continue-on-error: true
onedriver-0.14.1/.github/workflows/codeql.yml 0000664 0000000 0000000 00000001747 14513675524 0021206 0 ustar 00root root 0000000 0000000 name: "CodeQL"
on:
push:
branches: [ "master" ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "master" ]
schedule:
- cron: '26 17 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
- name: Build onedriver
run: make onedriver
env:
CGO_ENABLED: "0"
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
with:
category: "/language:${{matrix.language}}"
onedriver-0.14.1/.gitignore 0000664 0000000 0000000 00000001075 14513675524 0015601 0 ustar 00root root 0000000 0000000 .vscode/
.osc/
compile_flags.txt
.commit
.date
mount/
build/
tmp/
*.json
*.bak
*.log*
*.race.*
*.pdf
*.fa
*.docx
*.gz
*.db
*.test
*.out
*.txt
*.coverage
*.core
*.gdb
vgcore.*
__debug_bin*
# do not include binaries, but do include sources
onedriver
onedriver-headless
onedriver-launcher
onedriver-*/
!cmd/onedriver-launcher
!cmd/onedriver
vendor/
# rpm build stuff
*.rpm
# for tests on older linuxes
util-linux*
unshare
# debian build stuff
debian/debhelper-build-stamp
debian/*.substvars
debian/*.debhelper
debian/files
*.dsc
*.deb
*.changes
*.build*
*.upload
*.tar.xz
onedriver-0.14.1/.prettierignore 0000664 0000000 0000000 00000000025 14513675524 0016646 0 ustar 00root root 0000000 0000000 pkg/debian/changelog
onedriver-0.14.1/LICENSE 0000664 0000000 0000000 00000104515 14513675524 0014621 0 ustar 00root root 0000000 0000000 GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
Copyright (C)
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
.
onedriver-0.14.1/Makefile 0000664 0000000 0000000 00000013037 14513675524 0015252 0 ustar 00root root 0000000 0000000 .PHONY: all, test, test-init, srpm, rpm, dsc, changes, deb, clean, install, uninstall
# autocalculate software/package versions
VERSION := $(shell grep Version onedriver.spec | sed 's/Version: *//g')
RELEASE := $(shell grep -oP "Release: *[0-9]+" onedriver.spec | sed 's/Release: *//g')
DIST := $(shell rpm --eval "%{?dist}" 2> /dev/null || echo 1)
RPM_FULL_VERSION = $(VERSION)-$(RELEASE)$(DIST)
# -Wno-deprecated-declarations is for gotk3, which uses deprecated methods for older
# glib compatibility: https://github.com/gotk3/gotk3/issues/762#issuecomment-919035313
CGO_CFLAGS := CGO_CFLAGS=-Wno-deprecated-declarations
# test-specific variables
TEST_UID := $(shell whoami)
GORACE := GORACE="log_path=fusefs_tests.race strip_path_prefix=1"
all: onedriver onedriver-launcher
onedriver: $(shell find fs/ -type f) cmd/onedriver/main.go
bash cgo-helper.sh
$(CGO_CFLAGS) go build -v \
-ldflags="-X github.com/jstaf/onedriver/cmd/common.commit=$(shell git rev-parse HEAD)" \
./cmd/onedriver
onedriver-headless: $(shell find fs/ cmd/common/ -type f) cmd/onedriver/main.go
CGO_ENABLED=0 go build -v -o onedriver-headless \
-ldflags="-X github.com/jstaf/onedriver/cmd/common.commit=$(shell git rev-parse HEAD)" \
./cmd/onedriver
onedriver-launcher: $(shell find ui/ cmd/common/ -type f) cmd/onedriver-launcher/main.go
$(CGO_CFLAGS) go build -v \
-ldflags="-X github.com/jstaf/onedriver/cmd/common.commit=$(shell git rev-parse HEAD)" \
./cmd/onedriver-launcher
install: onedriver onedriver-launcher
cp onedriver /usr/bin/
cp onedriver-launcher /usr/bin/
mkdir -p /usr/share/icons/onedriver/
cp pkg/resources/onedriver.svg /usr/share/icons/onedriver/
cp pkg/resources/onedriver.png /usr/share/icons/onedriver/
cp pkg/resources/onedriver-128.png /usr/share/icons/onedriver/
cp pkg/resources/onedriver.desktop /usr/share/applications/
cp pkg/resources/onedriver@.service /etc/systemd/user/
gzip -c pkg/resources/onedriver.1 > /usr/share/man/man1/onedriver.1.gz
mandb
uninstall:
rm -f \
/usr/bin/onedriver \
/usr/bin/onedriver-launcher \
/etc/systemd/user/onedriver@.service \
/usr/share/applications/onedriver.desktop \
/usr/share/man/man1/onedriver.1.gz
rm -rf /usr/share/icons/onedriver
mandb
# used to create release tarball for rpmbuild
v$(VERSION).tar.gz: $(shell git ls-files)
rm -rf onedriver-$(VERSION)
mkdir -p onedriver-$(VERSION)
git ls-files > filelist.txt
git rev-parse HEAD > .commit
echo .commit >> filelist.txt
rsync -a --files-from=filelist.txt . onedriver-$(VERSION)
mv onedriver-$(VERSION)/pkg/debian onedriver-$(VERSION)
go mod vendor
cp -R vendor/ onedriver-$(VERSION)
tar -czf $@ onedriver-$(VERSION)
# build srpm package used for rpm build with mock
srpm: onedriver-$(RPM_FULL_VERSION).src.rpm
onedriver-$(RPM_FULL_VERSION).src.rpm: v$(VERSION).tar.gz
rpmbuild -ts $<
cp $$(rpm --eval '%{_topdir}')/SRPMS/$@ .
# build the rpm for the default mock target
MOCK_CONFIG=$(shell readlink -f /etc/mock/default.cfg | grep -oP '[a-z0-9-]+x86_64')
rpm: onedriver-$(RPM_FULL_VERSION).x86_64.rpm
onedriver-$(RPM_FULL_VERSION).x86_64.rpm: onedriver-$(RPM_FULL_VERSION).src.rpm
mock -r /etc/mock/$(MOCK_CONFIG).cfg $<
cp /var/lib/mock/$(MOCK_CONFIG)/result/$@ .
# create a release tarball for debian builds
onedriver_$(VERSION).orig.tar.gz: v$(VERSION).tar.gz
cp $< $@
# create the debian source package for the current version
changes: onedriver_$(VERSION)-$(RELEASE)_source.changes
onedriver_$(VERSION)-$(RELEASE)_source.changes: onedriver_$(VERSION).orig.tar.gz
cd onedriver-$(VERSION) && debuild -S -sa -d
# just a helper target to use while building debs
dsc: onedriver_$(VERSION)-$(RELEASE).dsc
onedriver_$(VERSION)-$(RELEASE).dsc: onedriver_$(VERSION).orig.tar.gz
dpkg-source --build onedriver-$(VERSION)
# create the debian package in a chroot via pbuilder
deb: onedriver_$(VERSION)-$(RELEASE)_amd64.deb
onedriver_$(VERSION)-$(RELEASE)_amd64.deb: onedriver_$(VERSION)-$(RELEASE).dsc
sudo mkdir -p /var/cache/pbuilder/aptcache
sudo pbuilder --build $<
cp /var/cache/pbuilder/result/$@ .
# a large text file for us to test upload sessions with. #science
dmel.fa:
curl ftp://ftp.ensemblgenomes.org/pub/metazoa/release-42/fasta/drosophila_melanogaster/dna/Drosophila_melanogaster.BDGP6.22.dna.chromosome.X.fa.gz | zcat > $@
# setup tests for the first time on a new computer
test-init: onedriver
go install github.com/rakyll/gotest@latest
mkdir -p mount/
$< -a mount/
# For offline tests, the test binary is built online, then network access is
# disabled and tests are run. sudo is required - otherwise we don't have
# permission to deny network access to onedriver during the test.
test: onedriver onedriver-launcher dmel.fa
rm -f *.race* fusefs_tests.log
CGO_ENABLED=0 gotest -v -parallel=8 -count=1 $(shell go list ./ui/... | grep -v offline)
$(CGO_CFLAGS) gotest -v -parallel=8 -count=1 ./cmd/...
$(CGO_CFLAGS) $(GORACE) gotest -race -v -parallel=8 -count=1 ./fs/graph/...
$(CGO_CFLAGS) $(GORACE) gotest -race -v -parallel=8 -count=1 ./fs
$(CGO_CFLAGS) go test -c ./fs/offline
@echo "sudo is required to run tests of offline functionality:"
sudo unshare -n sudo -u $(TEST_UID) ./offline.test -test.v -test.parallel=8 -test.count=1
# will literally purge everything: all built artifacts, all logs, all tests,
# all files tests depend on, all auth tokens... EVERYTHING
clean:
fusermount -uz mount/ || true
rm -f *.db *.rpm *.deb *.dsc *.changes *.build* *.upload *.xz filelist.txt .commit
rm -f *.log *.fa *.gz *.test vgcore.* onedriver onedriver-headless onedriver-launcher .auth_tokens.json
rm -rf util-linux-*/ onedriver-*/ vendor/ build/
onedriver-0.14.1/README.md 0000664 0000000 0000000 00000030030 14513675524 0015061 0 ustar 00root root 0000000 0000000 [](https://github.com/jstaf/onedriver/actions?query=workflow%3A%22Run+tests%22)
[](https://coveralls.io/github/jstaf/onedriver?branch=master)
[](https://copr.fedorainfracloud.org/coprs/jstaf/onedriver/package/onedriver/)
# onedriver
**onedriver is a native Linux filesystem for Microsoft OneDrive.**
onedriver is a network filesystem that gives your computer direct access to your
files on Microsoft OneDrive. This is not a sync client. Instead of syncing
files, onedriver performs an on-demand download of files when your computer
attempts to use them. onedriver allows you to use files on OneDrive as if they
were files on your local computer.
onedriver is extremely straightforwards to use:
- Install onedriver using your favorite installation method.
- Click the "+" button in the app to setup one or more OneDrive accounts.
(There's a command-line workflow for those who prefer doing things that way
too!)
- Just start using your files on OneDrive as if they were normal files.
I've spent a lot of time trying to make onedriver fast, convenient, and easy to
use. Though you can use it on servers, the goal here is to make it easy to work
with OneDrive files on your Linux desktop. This allows you to easily sync files
between any number of Windows, Mac, and Linux computers. You can setup your
phone to auto-upload photos to OneDrive and edit and view them on your Linux
computer. You can switch between LibreOffice on your local computer and the
Microsoft 365 online apps as needed when working. Want to migrate from Windows
to Linux? Just throw all your Windows files into OneDrive, add your OneDrive
account to Linux with onedriver, and call it a day.
**Microsoft OneDrive works on Linux.**
Getting started with your files on OneDrive is as easy as running:
`onedriver /path/to/mount/onedrive/at` (there's also a helpful GUI!).
## Key features
onedriver has several nice features that make it significantly more useful than
other OneDrive clients:
- **Files are only downloaded when you use them.** onedriver will only download
a file if you (or a program on your computer) uses that file. You don't need
to wait hours for a sync client to sync your entire OneDrive account to your
local computer or try to guess which files and folders you might need later
while setting up a "selective sync". onedriver gives you instant access to
_all_ of your files and only downloads the ones you use.
- **Bidirectional sync.** Although onedriver doesn't actually "sync" any files,
any changes that occur on OneDrive will be automatically reflected on your
local machine. onedriver will only redownload a file when you access a file
that has been changed remotely on OneDrive. If you somehow simultaneously
modify a file both locally on your computer and also remotely on OneDrive,
your local copy will always take priority (to avoid you losing any local
work).
- **Can be used offline.** Files you've opened previously will be available even
if your computer has no access to the internet. The filesystem becomes
read-only if you lose internet access, and automatically enables write access
again when you reconnect to the internet.
- **Fast.** Great care has been taken to ensure that onedriver never makes a
network request unless it actually needs to. onedriver caches both filesystem
metadata and file contents both in memory and on-disk. Accessing your OneDrive
files will be fast and snappy even if you're engaged in a fight to the death
for the last power outlet at a coffeeshop with bad wifi. (This has definitely
never happened to me before, why do you ask?)
- **Has a user interface.** You can add and remove your OneDrive accounts
without ever using the command-line. Once you've added your OneDrive accounts,
there's no special interface beyond your normal file browser.
- **Free and open-source.** They're your files. Why should you have to pay to
access them? onedriver is licensed under the GPLv3, which means you will
_always_ have access to use onedriver to access your files on OneDrive.
## Quick start
### Fedora/CentOS/RHEL
Users on Fedora/CentOS/RHEL systems are recommended to install onedriver from
[COPR](https://copr.fedorainfracloud.org/coprs/jstaf/onedriver/). This will
install the latest version of onedriver through your package manager and ensure
it stays up-to-date with bugfixes and new features.
```bash
sudo dnf copr enable jstaf/onedriver
sudo dnf install onedriver
```
### OpenSUSE
OpenSUSE users need to add the COPR repo either for Leap or Tumbleweed
```bash
# Leap 15.3
sudo zypper addrepo -g -r https://copr.fedorainfracloud.org/coprs/jstaf/onedriver/repo/opensuse-leap-15.3/jstaf-onedriver-opensuse-leap-15.3.repo onedriver
sudo zypper --gpg-auto-import-keys refresh
sudo zypper install onedriver
# Tumbleweed
sudo zypper addrepo -g -r https://copr.fedorainfracloud.org/coprs/jstaf/onedriver/repo/opensuse-tumbleweed/jstaf-onedriver-opensuse-tumbleweed.repo onedriver
sudo zypper --gpg-auto-import-keys refresh
sudo zypper install onedriver
```
### Ubuntu/Pop!\_OS/Debian
Ubuntu/Pop!\_OS/Debian users can install onedriver from the
[OpenSUSE Build Service](https://software.opensuse.org/download.html?project=home%3Ajstaf&package=onedriver)
(despite the name, OBS also does a nice job of building packages for Debian).
Like the COPR install, this will enable you to install onedriver through your
package manager and install updates as they become available. If you previously
installed onedriver via PPA, you can purge the old PPA from your system via:
`sudo add-apt-repository --remove ppa:jstaf/onedriver`
### Arch/Manjaro/EndeavourOS
Arch/Manjaro/EndeavourOS users can install onedriver from the
[AUR](https://aur.archlinux.org/packages/onedriver/).
Post-installation, you can start onedriver either via the `onedriver-launcher`
desktop app, or via the command line: `onedriver /path/to/mount/onedrive/at/`.
### Gentoo
Gentoo users can install onedriver from
[this ebuild overlay](https://github.com/foopsss/Ebuilds) provided by a user. If
you don't want to add user-hosted overlays to your system you may copy the
ebuild for the latest version to a local overlay, which can be created by
following the instructions available in the
[Gentoo Wiki](https://wiki.gentoo.org/wiki/Creating_an_ebuild_repository).
Make sure to carefully review the ebuild for the package before installing it
## Multiple drives and starting OneDrive on login via systemd
**Note:** You can also set this up through the GUI via the `onedriver-launcher`
desktop app installed via rpm/deb/`make install`. You can skip this section if
you're using the GUI. It's honestly easier.
To start onedriver automatically and ensure you always have access to your
files, you can start onedriver as a systemd user service. In this example,
`$MOUNTPOINT` refers to where we want OneDrive to be mounted at (for instance,
`~/OneDrive`).
```bash
# create the mountpoint and determine the service name
mkdir -p $MOUNTPOINT
export SERVICE_NAME=$(systemd-escape --template onedriver@.service --path $MOUNTPOINT)
# mount onedrive
systemctl --user daemon-reload
systemctl --user start $SERVICE_NAME
# automatically mount onedrive when you login
systemctl --user enable $SERVICE_NAME
# check onedriver's logs for the current day
journalctl --user -u $SERVICE_NAME --since today
```
## Building onedriver yourself
In addition to the traditional [Go tooling](https://golang.org/dl/), you will
need a C compiler and development headers for `webkit2gtk-4.0` and `json-glib`.
On Fedora, these can be obtained with
`dnf install golang gcc pkg-config webkit2gtk3-devel json-glib-devel`. On
Ubuntu, these dependencies can be installed with
`apt install golang gcc pkg-config libwebkit2gtk-4.0-dev libjson-glib-dev`.
```bash
# to build and run the binary
make
mkdir mount
./onedriver mount/
# in new window, check out the mounted filesystem
ls -l mount
# unmount the filesystem
fusermount -uz mount
# you can also just "ctrl-c" onedriver to unmount it
```
### Running the tests
The tests will write and delete files/folders on your onedrive account at the
path `/onedriver_tests`. Note that the offline test suite requires `sudo` to
remove network access to simulate being offline.
```bash
# setup test tooling for first time run
make test-init
# actually run tests
make test
```
### Installation from source
onedriver has multiple installation methods depending on your needs.
```bash
# install directly from source
make
sudo make install
# create an RPM for system-wide installation on RHEL/CentOS/Fedora using mock
sudo dnf install golang gcc webkit2gtk3-devel json-glib-devel pkg-config git \
rsync rpmdevtools rpm-build mock
sudo usermod -aG mock $USER
newgrp mock
make rpm
# create a .deb for system-wide installation on Ubuntu/Debian using pbuilder
sudo apt update
sudo apt install golang gcc libwebkit2gtk-4.0-dev libjson-glib-dev pkg-config git \
rsync devscripts debhelper build-essential pbuilder
sudo pbuilder create # may need to add "--distribution focal" on ubuntu
make deb
```
## Troubleshooting
During your OneDrive travels, you might hit a bug that I haven't squashed yet.
Don't panic! In most cases, the filesystem will report what happened to whatever
program you're using. (As an example, an error mentioning a "read-only
filesystem" indicates that your computer is currently offline.)
If the filesystem appears to hang or "freeze" indefinitely, its possible the
fileystem has crashed. To resolve this, just restart the program by unmounting
and remounting things via the GUI or by running `fusermount -uz $MOUNTPOINT` on
the command-line.
If you really want to go back to a clean slate, onedriver can be completely
reset (delete all cached local data) by deleting mounts in the GUI or running
`onedriver -w`.
If you encounter a bug or have a feature request, open an issue in the "Issues"
tab here on GitHub. The two most informative things you can put in a bug report
are the logs from the bug/just before encountering the bug (get logs via
`journalctl --user -u $SERVICE_NAME --since today` ... see docs for correct
value of `$SERVICE_NAME`) and/or instructions on how to reproduce the issue.
Otherwise I have to guess what the problem is :disappointed:
## Known issues & disclaimer
Many file browsers (like
[GNOME's Nautilus](https://gitlab.gnome.org/GNOME/nautilus/-/issues/1209)) will
attempt to automatically download all files within a directory in order to
create thumbnail images. This is somewhat annoying, but only needs to happen
once - after the initial thumbnail images have been created, thumbnails will
persist between filesystem restarts.
Microsoft does not support symbolic links (or anything remotely like them) on
OneDrive. Attempting to create symbolic links within the filesystem returns
ENOSYS (function not implemented) because the functionality hasn't been
implemented... by Microsoft. Similarly, Microsoft does not expose the OneDrive
Recycle Bin APIs - if you want to empty or restore the OneDrive Recycle Bin, you
must do so through the OneDrive web UI (onedriver uses the native system
trash/restore functionality independently of the OneDrive Recycle Bin).
onedriver loads files into memory when you access them. This makes things very
fast, but obviously doesn't work very well if you have very large files. Use a
sync client like [rclone](https://rclone.org/) if you need the ability to copy
multi-gigabyte files to OneDrive.
OneDrive is not a good place to backup files to. Use a tool like
[restic](https://restic.net/) or [borg](https://www.borgbackup.org/) if you're
looking for a reliable encrypted backup tool. I know some of you want to "back
up your files to OneDrive". Don't do it. Restic and Borg are better in every
possible way than any OneDrive client ever will be when it comes to creating
backups you can count on.
Finally, this project is still in active development and is provided AS IS.
There are no guarantees. It might kill your cat.
onedriver-0.14.1/cgo-helper.sh 0000775 0000000 0000000 00000000756 14513675524 0016202 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
# cgo cannot conditionally use different packages based on which system packages
# are installed so this script is here to autodetect which webkit2gtk c headers
# we have access to
if [ -n "$CGO_ENABLED" ] && [ "$CGO_ENABLED" -eq 0 ]; then
exit 0
fi
if pkg-config webkit2gtk-4.0; then
sed -i 's/webkit2gtk-4.1/webkit2gtk-4.0/g' fs/graph/oauth2_gtk.go
elif ! pkg-config webkit2gtk-4.1; then
echo "webkit2gtk development headers must be installed"
exit 1
fi
onedriver-0.14.1/cmd/ 0000775 0000000 0000000 00000000000 14513675524 0014351 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/cmd/common/ 0000775 0000000 0000000 00000000000 14513675524 0015641 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/cmd/common/common.go 0000664 0000000 0000000 00000001467 14513675524 0017470 0 ustar 00root root 0000000 0000000 // common functions used by both binaries
package common
import (
"fmt"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
const version = "0.14.1"
var commit string
// Version returns the current version string
func Version() string {
clen := 0
if len(commit) > 7 {
clen = 8
}
return fmt.Sprintf("v%s %s", version, commit[:clen])
}
// StringToLevel converts a string to a zerolog.LogLevel that can be used with zerolog
func StringToLevel(input string) zerolog.Level {
level, err := zerolog.ParseLevel(input)
if err != nil {
log.Error().Err(err).Msg("Could not parse log level, defaulting to \"debug\"")
return zerolog.DebugLevel
}
return level
}
// LogLevels returns the available logging levels
func LogLevels() []string {
return []string{"trace", "debug", "info", "warn", "error", "fatal"}
}
onedriver-0.14.1/cmd/common/config.go 0000664 0000000 0000000 00000003277 14513675524 0017446 0 ustar 00root root 0000000 0000000 package common
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/imdario/mergo"
"github.com/jstaf/onedriver/fs/graph"
"github.com/jstaf/onedriver/ui"
"github.com/rs/zerolog/log"
yaml "gopkg.in/yaml.v3"
)
type Config struct {
CacheDir string `yaml:"cacheDir"`
LogLevel string `yaml:"log"`
graph.AuthConfig `yaml:"auth"`
}
// DefaultConfigPath returns the default config location for onedriver
func DefaultConfigPath() string {
confDir, err := os.UserConfigDir()
if err != nil {
log.Error().Err(err).Msg("Could not determine configuration directory.")
}
return filepath.Join(confDir, "onedriver/config.yml")
}
// LoadConfig is the primary way of loading onedriver's config
func LoadConfig(path string) *Config {
xdgCacheDir, _ := os.UserCacheDir()
defaults := Config{
CacheDir: filepath.Join(xdgCacheDir, "onedriver"),
LogLevel: "debug",
}
conf, err := ioutil.ReadFile(path)
if err != nil {
log.Warn().
Err(err).
Str("path", path).
Msg("Configuration file not found, using defaults.")
return &defaults
}
config := &Config{}
if err = yaml.Unmarshal(conf, config); err != nil {
log.Error().
Err(err).
Str("path", path).
Msg("Could not parse configuration file, using defaults.")
}
if err = mergo.Merge(config, defaults); err != nil {
log.Error().
Err(err).
Str("path", path).
Msg("Could not merge configuration file with defaults, using defaults only.")
}
config.CacheDir = ui.UnescapeHome(config.CacheDir)
return config
}
// Write config to a file
func (c Config) WriteConfig(path string) {
out, err := yaml.Marshal(c)
if err != nil {
log.Error().Err(err).Msg("Could not marshal config!")
}
ioutil.WriteFile(path, out, 0600)
}
onedriver-0.14.1/cmd/common/config_test.go 0000664 0000000 0000000 00000001767 14513675524 0020507 0 ustar 00root root 0000000 0000000 package common
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
const configTestDir = "pkg/resources/test"
// We should load config correctly.
func TestLoadConfig(t *testing.T) {
t.Parallel()
conf := LoadConfig(filepath.Join(configTestDir, "config-test.yml"))
home, _ := os.UserHomeDir()
assert.Equal(t, filepath.Join(home, "somewhere/else"), conf.CacheDir)
assert.Equal(t, "warn", conf.LogLevel)
}
func TestConfigMerge(t *testing.T) {
t.Parallel()
conf := LoadConfig(filepath.Join(configTestDir, "config-test-merge.yml"))
assert.Equal(t, "debug", conf.LogLevel)
assert.Equal(t, "/some/directory", conf.CacheDir)
}
// We should come up with the defaults if there is no config file.
func TestLoadNonexistentConfig(t *testing.T) {
t.Parallel()
conf := LoadConfig(filepath.Join(configTestDir, "does-not-exist.yml"))
home, _ := os.UserHomeDir()
assert.Equal(t, filepath.Join(home, ".cache/onedriver"), conf.CacheDir)
assert.Equal(t, "debug", conf.LogLevel)
}
onedriver-0.14.1/cmd/common/setup_test.go 0000664 0000000 0000000 00000000610 14513675524 0020364 0 ustar 00root root 0000000 0000000 package common
import (
"os"
"testing"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func TestMain(m *testing.M) {
os.Chdir("../..")
f, _ := os.OpenFile("fusefs_tests.log", os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0644)
zerolog.SetGlobalLevel(zerolog.TraceLevel)
log.Logger = log.Output(zerolog.ConsoleWriter{Out: f, TimeFormat: "15:04:05"})
defer f.Close()
os.Exit(m.Run())
}
onedriver-0.14.1/cmd/onedriver-launcher/ 0000775 0000000 0000000 00000000000 14513675524 0020145 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/cmd/onedriver-launcher/main.go 0000664 0000000 0000000 00000033303 14513675524 0021422 0 ustar 00root root 0000000 0000000 package main
/*
#cgo linux pkg-config: gtk+-3.0
#include
#include
*/
import "C"
import (
"fmt"
"os"
"path/filepath"
"unsafe"
"github.com/coreos/go-systemd/v22/unit"
"github.com/gotk3/gotk3/glib"
"github.com/gotk3/gotk3/gtk"
"github.com/jstaf/onedriver/cmd/common"
"github.com/jstaf/onedriver/ui"
"github.com/jstaf/onedriver/ui/systemd"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
flag "github.com/spf13/pflag"
)
func usage() {
fmt.Printf(`onedriver-launcher - Manage and configure onedriver mountpoints
Usage: onedriver-launcher [options]
Valid options:
`)
flag.PrintDefaults()
}
func main() {
logLevel := flag.StringP("log", "l", "",
"Set logging level/verbosity for the filesystem. "+
"Can be one of: fatal, error, warn, info, debug, trace")
cacheDir := flag.StringP("cache-dir", "c", "",
"Change the default cache directory used by onedriver. "+
"Will be created if it does not already exist.")
configPath := flag.StringP("config-file", "f", common.DefaultConfigPath(),
"A YAML-formatted configuration file used by onedriver.")
versionFlag := flag.BoolP("version", "v", false, "Display program version.")
help := flag.BoolP("help", "h", false, "Displays this help message.")
flag.Usage = usage
flag.Parse()
if *help {
flag.Usage()
os.Exit(0)
}
if *versionFlag {
fmt.Println("onedriver-launcher", common.Version())
os.Exit(0)
}
// command line options override config options
config := common.LoadConfig(*configPath)
if *cacheDir != "" {
config.CacheDir = *cacheDir
}
if *logLevel != "" {
config.LogLevel = *logLevel
}
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: "15:04:05"})
zerolog.SetGlobalLevel(common.StringToLevel(config.LogLevel))
log.Info().Msgf("onedriver-launcher %s", common.Version())
app, err := gtk.ApplicationNew("com.github.jstaf.onedriver", glib.APPLICATION_FLAGS_NONE)
if err != nil {
log.Fatal().Err(err).Msg("Could not create application.")
}
app.Connect("activate", func(application *gtk.Application) {
activateCallback(application, config, *configPath)
})
os.Exit(app.Run(nil))
}
// activateCallback is what actually sets up the application
func activateCallback(app *gtk.Application, config *common.Config, configPath string) {
window, _ := gtk.ApplicationWindowNew(app)
window.SetDefaultSize(550, 400)
header, _ := gtk.HeaderBarNew()
header.SetShowCloseButton(true)
header.SetTitle("onedriver")
window.SetTitlebar(header)
listbox, _ := gtk.ListBoxNew()
window.Add(listbox)
switches := make(map[string]*gtk.Switch)
mountpointBtn, _ := gtk.ButtonNewFromIconName("list-add-symbolic", gtk.ICON_SIZE_BUTTON)
mountpointBtn.SetTooltipText("Add a new OneDrive account.")
mountpointBtn.Connect("clicked", func(button *gtk.Button) {
mount := ui.DirChooser("Select a mountpoint")
if !ui.MountpointIsValid(mount) {
log.Error().Str("mountpoint", mount).
Msg("Mountpoint was not valid (or user cancelled the operation). " +
"Mountpoint must be an empty directory.")
if mount != "" {
ui.Dialog(
"Mountpoint was not valid, mountpoint must be an empty directory "+
"(there might be hidden files).", gtk.MESSAGE_ERROR, window)
}
return
}
escapedMount := unit.UnitNamePathEscape(mount)
systemdUnit := systemd.TemplateUnit(systemd.OnedriverServiceTemplate, escapedMount)
log.Info().
Str("mountpoint", mount).
Str("systemdUnit", systemdUnit).
Msg("Creating mountpoint.")
if err := systemd.UnitSetActive(systemdUnit, true); err != nil {
log.Error().Err(err).Msg("Failed to start unit.")
return
}
row, sw := newMountRow(*config, mount)
switches[mount] = sw
listbox.Insert(row, -1)
go xdgOpenDir(mount)
})
header.PackStart(mountpointBtn)
// create a menubutton and assign a popover menu
menuBtn, _ := gtk.MenuButtonNew()
icon, _ := gtk.ImageNewFromIconName("open-menu-symbolic", gtk.ICON_SIZE_BUTTON)
menuBtn.SetImage(icon)
popover, _ := gtk.PopoverNew(menuBtn)
menuBtn.SetPopover(popover)
popover.SetBorderWidth(8)
// add buttons to menu
popoverBox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, 5)
settings, _ := gtk.ModelButtonNew()
settings.SetLabel("Settings")
settings.Connect("clicked", func(button *gtk.ModelButton) {
newSettingsWindow(config, configPath)
})
popoverBox.PackStart(settings, false, true, 0)
// print version and link to repo
about, _ := gtk.ModelButtonNew()
about.SetLabel("About")
about.Connect("clicked", func(button *gtk.ModelButton) {
aboutDialog, _ := gtk.AboutDialogNew()
aboutDialog.SetAuthors([]string{"Jeff Stafford", "https://github.com/jstaf"})
aboutDialog.SetWebsite("https://github.com/jstaf/onedriver")
aboutDialog.SetWebsiteLabel("github.com/jstaf/onedriver")
aboutDialog.SetVersion(fmt.Sprintf("onedriver %s", common.Version()))
aboutDialog.SetLicenseType(gtk.LICENSE_GPL_3_0)
logo, err := gtk.ImageNewFromFile("/usr/share/icons/onedriver/onedriver-128.png")
if err != nil {
log.Error().Err(err).Msg("Could not find logo.")
} else {
aboutDialog.SetLogo(logo.GetPixbuf())
}
aboutDialog.Run()
})
popoverBox.PackStart(about, false, true, 0)
popoverBox.ShowAll()
popover.Add(popoverBox)
popover.SetPosition(gtk.POS_BOTTOM)
header.PackEnd(menuBtn)
mounts := ui.GetKnownMounts(config.CacheDir)
for _, mount := range mounts {
mount = unit.UnitNamePathUnescape(mount)
log.Info().Str("mount", mount).Msg("Found existing mount.")
row, sw := newMountRow(*config, mount)
switches[mount] = sw
listbox.Insert(row, -1)
}
listbox.Connect("row-activated", func() {
row := listbox.GetSelectedRow()
mount, _ := row.GetName()
unitName := systemd.TemplateUnit(systemd.OnedriverServiceTemplate,
unit.UnitNamePathEscape(mount))
log.Debug().
Str("mount", mount).
Str("unit", unitName).
Str("signal", "row-activated").
Msg("")
active, _ := systemd.UnitIsActive(unitName)
if !active {
err := systemd.UnitSetActive(unitName, true)
if err != nil {
log.Error().
Err(err).
Str("unit", unitName).
Msg("Could not set unit state to active.")
}
}
switches[mount].SetActive(true)
go xdgOpenDir(mount)
})
window.ShowAll()
}
// xdgOpenDir opens a folder in the user's default file browser.
// Should be invoked as a goroutine to not block the main app.
func xdgOpenDir(mount string) {
log.Debug().Str("dir", mount).Msg("Opening directory.")
if mount == "" || !ui.PollUntilAvail(mount, -1) {
log.Error().
Str("dir", mount).
Msg("Either directory was invalid or exceeded timeout waiting for fs to become available.")
return
}
cURI := C.CString("file://" + mount)
C.g_app_info_launch_default_for_uri(cURI, nil, nil)
C.free(unsafe.Pointer(cURI))
}
// newMountRow constructs a new ListBoxRow with the controls for an individual mountpoint.
// mount is the path to the new mountpoint.
func newMountRow(config common.Config, mount string) (*gtk.ListBoxRow, *gtk.Switch) {
row, _ := gtk.ListBoxRowNew()
row.SetSelectable(true)
box, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, 5)
row.Add(box)
escapedMount := unit.UnitNamePathEscape(mount)
unitName := systemd.TemplateUnit(systemd.OnedriverServiceTemplate, escapedMount)
var label *gtk.Label
tildePath := ui.EscapeHome(mount)
accountName, err := ui.GetAccountName(config.CacheDir, escapedMount)
if err != nil {
log.Error().
Err(err).
Str("mountpoint", mount).
Msg("Could not determine acccount name.")
label, _ = gtk.LabelNew(tildePath)
} else {
label, _ = gtk.LabelNew("")
label.SetMarkup(fmt.Sprintf("%s (%s) ",
accountName, tildePath,
))
}
box.PackStart(label, false, false, 5)
// create a button to delete the mountpoint
deleteMountpointBtn, _ := gtk.ButtonNewFromIconName("user-trash-symbolic", gtk.ICON_SIZE_BUTTON)
deleteMountpointBtn.SetTooltipText("Remove OneDrive account from local computer")
deleteMountpointBtn.Connect("clicked", func() {
log.Trace().
Str("signal", "clicked").
Str("mount", mount).
Str("unitName", unitName).
Msg("Request to delete mount.")
if ui.CancelDialog("Remove mountpoint?", nil) {
log.Info().
Str("signal", "clicked").
Str("mount", mount).
Str("unitName", unitName).
Msg("Deleting mount.")
systemd.UnitSetEnabled(unitName, false)
systemd.UnitSetActive(unitName, false)
cachedir, _ := os.UserCacheDir()
os.RemoveAll(fmt.Sprintf("%s/onedriver/%s/", cachedir, escapedMount))
row.Destroy()
}
})
box.PackEnd(deleteMountpointBtn, false, false, 0)
// create a button to enable/disable the mountpoint
unitEnabledBtn, _ := gtk.ToggleButtonNew()
enabledImg, _ := gtk.ImageNewFromIconName("object-select-symbolic", gtk.ICON_SIZE_BUTTON)
unitEnabledBtn.SetImage(enabledImg)
unitEnabledBtn.SetTooltipText("Start mountpoint on login")
enabled, err := systemd.UnitIsEnabled(unitName)
if err == nil {
unitEnabledBtn.SetActive(enabled)
} else {
log.Error().Err(err).Msg("Error checking unit enabled state.")
}
unitEnabledBtn.Connect("toggled", func() {
log.Info().
Str("signal", "toggled").
Str("mount", mount).
Str("unitName", unitName).
Bool("enabled", unitEnabledBtn.GetActive()).
Msg("Changing systemd unit enabled state.")
err := systemd.UnitSetEnabled(unitName, unitEnabledBtn.GetActive())
if err != nil {
log.Error().
Err(err).
Str("unit", unitName).
Msg("Could not change systemd unit enabled state.")
}
})
box.PackEnd(unitEnabledBtn, false, false, 0)
// a switch to start/stop the mountpoint
mountToggle, _ := gtk.SwitchNew()
active, err := systemd.UnitIsActive(unitName)
if err == nil {
mountToggle.SetActive(active)
} else {
log.Error().Err(err).Msg("Error checking unit active state.")
}
mountToggle.SetTooltipText("Mount or unmount selected OneDrive account")
mountToggle.SetVAlign(gtk.ALIGN_CENTER)
mountToggle.Connect("state-set", func() {
log.Info().
Str("signal", "state-set").
Str("mount", mount).
Str("unitName", unitName).
Bool("active", mountToggle.GetActive()).
Msg("Changing systemd unit active state.")
err := systemd.UnitSetActive(unitName, mountToggle.GetActive())
if err != nil {
log.Error().
Err(err).
Str("unit", unitName).
Msg("Could not change systemd unit active state.")
}
})
box.PackEnd(mountToggle, false, false, 0)
// name is used by "row-activated" callback
row.SetName(mount)
row.ShowAll()
return row, mountToggle
}
func newSettingsWindow(config *common.Config, configPath string) {
const offset = 15
settingsWindow, _ := gtk.WindowNew(gtk.WINDOW_TOPLEVEL)
settingsWindow.SetResizable(false)
settingsWindow.SetTitle("Settings")
// log level settings
settingsRowLog, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, offset)
logLevelLabel, _ := gtk.LabelNew("Log level")
settingsRowLog.PackStart(logLevelLabel, false, false, 0)
logLevelSelector, _ := gtk.ComboBoxTextNew()
for i, entry := range common.LogLevels() {
logLevelSelector.AppendText(entry)
if entry == config.LogLevel {
logLevelSelector.SetActive(i)
}
}
logLevelSelector.Connect("changed", func(box *gtk.ComboBoxText) {
config.LogLevel = box.GetActiveText()
log.Debug().
Str("newLevel", config.LogLevel).
Msg("Log level changed.")
zerolog.SetGlobalLevel(common.StringToLevel(config.LogLevel))
config.WriteConfig(configPath)
})
settingsRowLog.PackEnd(logLevelSelector, false, false, 0)
// cache dir settings
settingsRowCacheDir, _ := gtk.BoxNew(gtk.ORIENTATION_HORIZONTAL, offset)
cacheDirLabel, _ := gtk.LabelNew("Cache directory")
settingsRowCacheDir.PackStart(cacheDirLabel, false, false, 0)
cacheDirPicker, _ := gtk.ButtonNew()
cacheDirPicker.SetLabel(ui.EscapeHome(config.CacheDir))
cacheDirPicker.SetSizeRequest(200, 0)
cacheDirPicker.Connect("clicked", func(button *gtk.Button) {
oldPath, _ := button.GetLabel()
oldPath = ui.UnescapeHome(oldPath)
path := ui.DirChooser("Select an empty directory to use for storage")
if !ui.CancelDialog("Remount all drives?", settingsWindow) {
return
}
log.Warn().
Str("oldPath", oldPath).
Str("newPath", path).
Msg("All active drives will be remounted to move cache directory.")
// actually perform the stop+move op
isMounted := make([]string, 0)
for _, mount := range ui.GetKnownMounts(oldPath) {
unitName := systemd.TemplateUnit(systemd.OnedriverServiceTemplate, mount)
log.Info().
Str("mount", mount).
Str("unit", unitName).
Msg("Disabling mount.")
if mounted, _ := systemd.UnitIsActive(unitName); mounted {
isMounted = append(isMounted, unitName)
}
err := systemd.UnitSetActive(unitName, false)
if err != nil {
ui.Dialog("Could not disable mount: "+err.Error(),
gtk.MESSAGE_ERROR, settingsWindow)
log.Error().
Err(err).
Str("mount", mount).
Str("unit", unitName).
Msg("Could not disable mount.")
return
}
err = os.Rename(filepath.Join(oldPath, mount), filepath.Join(path, mount))
if err != nil {
ui.Dialog("Could not move cache for mount: "+err.Error(),
gtk.MESSAGE_ERROR, settingsWindow)
log.Error().
Err(err).
Str("mount", mount).
Str("unit", unitName).
Msg("Could not move cache for mount.")
return
}
}
// remount drives that were mounted before
for _, unitName := range isMounted {
err := systemd.UnitSetActive(unitName, true)
if err != nil {
log.Error().
Err(err).
Str("unit", unitName).
Msg("Failed to restart unit.")
}
}
// all done
config.CacheDir = path
config.WriteConfig(configPath)
button.SetLabel(path)
})
settingsRowCacheDir.PackEnd(cacheDirPicker, false, false, 0)
// assemble rows
settingsWindowBox, _ := gtk.BoxNew(gtk.ORIENTATION_VERTICAL, offset)
settingsWindowBox.SetBorderWidth(offset)
settingsWindowBox.PackStart(settingsRowLog, true, true, 0)
settingsWindowBox.PackStart(settingsRowCacheDir, true, true, 0)
settingsWindow.Add(settingsWindowBox)
settingsWindow.ShowAll()
}
onedriver-0.14.1/cmd/onedriver/ 0000775 0000000 0000000 00000000000 14513675524 0016346 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/cmd/onedriver/main.go 0000664 0000000 0000000 00000013475 14513675524 0017633 0 ustar 00root root 0000000 0000000 package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/signal"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/coreos/go-systemd/v22/unit"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/cmd/common"
"github.com/jstaf/onedriver/fs"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
flag "github.com/spf13/pflag"
)
func usage() {
fmt.Printf(`onedriver - A Linux client for Microsoft OneDrive.
This program will mount your OneDrive account as a Linux filesystem at the
specified mountpoint. Note that this is not a sync client - files are only
fetched on-demand and cached locally. Only files you actually use will be
downloaded. While offline, the filesystem will be read-only until
connectivity is re-established.
Usage: onedriver [options]
Valid options:
`)
flag.PrintDefaults()
}
func main() {
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: "15:04:05"})
// setup cli parsing
authOnly := flag.BoolP("auth-only", "a", false,
"Authenticate to OneDrive and then exit.")
headless := flag.BoolP("no-browser", "n", false,
"This disables launching the built-in web browser during authentication. "+
"Follow the instructions in the terminal to authenticate to OneDrive.")
configPath := flag.StringP("config-file", "f", common.DefaultConfigPath(),
"A YAML-formatted configuration file used by onedriver.")
logLevel := flag.StringP("log", "l", "",
"Set logging level/verbosity for the filesystem. "+
"Can be one of: fatal, error, warn, info, debug, trace")
cacheDir := flag.StringP("cache-dir", "c", "",
"Change the default cache directory used by onedriver. "+
"Will be created if the path does not already exist.")
wipeCache := flag.BoolP("wipe-cache", "w", false,
"Delete the existing onedriver cache directory and then exit. "+
"This is equivalent to resetting the program.")
versionFlag := flag.BoolP("version", "v", false, "Display program version.")
debugOn := flag.BoolP("debug", "d", false, "Enable FUSE debug logging. "+
"This logs communication between onedriver and the kernel.")
help := flag.BoolP("help", "h", false, "Displays this help message.")
flag.Usage = usage
flag.Parse()
if *help {
flag.Usage()
os.Exit(0)
}
if *versionFlag {
fmt.Println("onedriver", common.Version())
os.Exit(0)
}
config := common.LoadConfig(*configPath)
// command line options override config options
if *cacheDir != "" {
config.CacheDir = *cacheDir
}
if *logLevel != "" {
config.LogLevel = *logLevel
}
zerolog.SetGlobalLevel(common.StringToLevel(config.LogLevel))
// wipe cache if desired
if *wipeCache {
log.Info().Str("path", config.CacheDir).Msg("Removing cache.")
os.RemoveAll(config.CacheDir)
os.Exit(0)
}
// determine and validate mountpoint
if len(flag.Args()) == 0 {
flag.Usage()
fmt.Fprintf(os.Stderr, "\nNo mountpoint provided, exiting.\n")
os.Exit(1)
}
mountpoint := flag.Arg(0)
st, err := os.Stat(mountpoint)
if err != nil || !st.IsDir() {
log.Fatal().
Str("mountpoint", mountpoint).
Msg("Mountpoint did not exist or was not a directory.")
}
if res, _ := ioutil.ReadDir(mountpoint); len(res) > 0 {
log.Fatal().Str("mountpoint", mountpoint).Msg("Mountpoint must be empty.")
}
// compute cache name as systemd would
absMountPath, _ := filepath.Abs(mountpoint)
cachePath := filepath.Join(config.CacheDir, unit.UnitNamePathEscape(absMountPath))
// authenticate/re-authenticate if necessary
os.MkdirAll(cachePath, 0700)
authPath := filepath.Join(cachePath, "auth_tokens.json")
if *authOnly {
os.Remove(authPath)
graph.Authenticate(config.AuthConfig, authPath, *headless)
os.Exit(0)
}
// create the filesystem
log.Info().Msgf("onedriver %s", common.Version())
auth := graph.Authenticate(config.AuthConfig, authPath, *headless)
filesystem := fs.NewFilesystem(auth, cachePath)
go filesystem.DeltaLoop(30 * time.Second)
xdgVolumeInfo(filesystem, auth)
server, err := fuse.NewServer(filesystem, mountpoint, &fuse.MountOptions{
Name: "onedriver",
FsName: "onedriver",
DisableXAttrs: true,
MaxBackground: 1024,
Debug: *debugOn,
})
if err != nil {
log.Fatal().Err(err).Msgf("Mount failed. Is the mountpoint already in use? "+
"(Try running \"fusermount -uz %s\")\n", mountpoint)
}
// setup signal handler for graceful unmount on signals like sigint
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go fs.UnmountHandler(sigChan, server)
// serve filesystem
log.Info().
Str("cachePath", cachePath).
Str("mountpoint", absMountPath).
Msg("Serving filesystem.")
server.Serve()
}
// xdgVolumeInfo createx .xdg-volume-info for a nice little onedrive logo in the
// corner of the mountpoint and shows the account name in the nautilus sidebar
func xdgVolumeInfo(filesystem *fs.Filesystem, auth *graph.Auth) {
if child, _ := filesystem.GetPath("/.xdg-volume-info", auth); child != nil {
return
}
log.Info().Msg("Creating .xdg-volume-info")
user, err := graph.GetUser(auth)
if err != nil {
log.Error().Err(err).Msg("Could not create .xdg-volume-info")
return
}
xdgVolumeInfo := fmt.Sprintf("[Volume Info]\nName=%s\n", user.UserPrincipalName)
if _, err := os.Stat("/usr/share/icons/onedriver/onedriver.png"); err == nil {
xdgVolumeInfo += "IconFile=/usr/share/icons/onedriver/onedriver.png\n"
}
// just upload directly and shove it in the cache
// (since the fs isn't mounted yet)
resp, err := graph.Put(
graph.ResourcePath("/.xdg-volume-info")+":/content",
auth,
strings.NewReader(xdgVolumeInfo),
)
if err != nil {
log.Error().Err(err).Msg("Failed to write .xdg-volume-info")
}
root, _ := filesystem.GetPath("/", auth) // cannot fail
inode := fs.NewInode(".xdg-volume-info", 0644, root)
if json.Unmarshal(resp, &inode) == nil {
filesystem.InsertID(inode.ID(), inode)
}
}
onedriver-0.14.1/curl-graph.sh 0000775 0000000 0000000 00000001303 14513675524 0016206 0 ustar 00root root 0000000 0000000 #!/usr/bin/env bash
if [ -z "$1" ] || [ "$1" == "--help" ] || [ "$1" == "-h" ]; then
echo "curl-graph.sh is a dev tool useful for exploring the Microsoft Graph API via curl."
echo
echo "$(tput bold)Usage:$(tput sgr0) ./curl-graph.sh [auth-token-file] api_endpoint [other curl options]"
echo "$(tput bold)Example:$(tput sgr0) ./curl-graph.sh ~/.cache/onedriver/auth_tokens.sh /me"
exit 0
fi
if [ -f "$1" ]; then
TOKEN=$(jq -r .access_token "$1")
ENDPOINT="$2"
shift 2
else
TOKEN=$(jq -r .access_token ~/.cache/onedriver/auth_tokens.json)
ENDPOINT="$1"
shift 1
fi
curl -s -H "Authorization: bearer $TOKEN" $@ "https://graph.microsoft.com/v1.0$ENDPOINT" | jq .
onedriver-0.14.1/fs/ 0000775 0000000 0000000 00000000000 14513675524 0014216 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/fs/cache.go 0000664 0000000 0000000 00000041053 14513675524 0015613 0 ustar 00root root 0000000 0000000 package fs
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog/log"
bolt "go.etcd.io/bbolt"
)
// Filesystem is the actual FUSE filesystem and uses the go analogy of the
// "low-level" FUSE API here:
// https://github.com/libfuse/libfuse/blob/master/include/fuse_lowlevel.h
type Filesystem struct {
fuse.RawFileSystem
metadata sync.Map
db *bolt.DB
content *LoopbackCache
auth *graph.Auth
root string // the id of the filesystem's root item
deltaLink string
uploads *UploadManager
sync.RWMutex
offline bool
lastNodeID uint64
inodes []string
// tracks currently open directories
opendirsM sync.RWMutex
opendirs map[uint64][]*Inode
}
// boltdb buckets
var (
bucketContent = []byte("content")
bucketMetadata = []byte("metadata")
bucketDelta = []byte("delta")
bucketVersion = []byte("version")
)
// so we can tell what format the db has
const fsVersion = "1"
// NewFilesystem creates a new filesystem
func NewFilesystem(auth *graph.Auth, cacheDir string) *Filesystem {
// prepare cache directory
if _, err := os.Stat(cacheDir); err != nil {
if err = os.Mkdir(cacheDir, 0700); err != nil {
log.Fatal().Err(err).Msg("Could not create cache directory.")
}
}
db, err := bolt.Open(
filepath.Join(cacheDir, "onedriver.db"),
0600,
&bolt.Options{Timeout: time.Second * 5},
)
if err != nil {
log.Fatal().Err(err).Msg("Could not open DB. Is it already in use by another mount?")
}
content := NewLoopbackCache(filepath.Join(cacheDir, "content"))
db.Update(func(tx *bolt.Tx) error {
tx.CreateBucketIfNotExists(bucketMetadata)
tx.CreateBucketIfNotExists(bucketDelta)
versionBucket, _ := tx.CreateBucketIfNotExists(bucketVersion)
// migrate old content bucket to the local filesystem
b := tx.Bucket(bucketContent)
if b != nil {
oldVersion := "0"
log.Info().
Str("oldVersion", oldVersion).
Str("version", fsVersion).
Msg("Migrating to new db format.")
err := b.ForEach(func(k []byte, v []byte) error {
log.Info().Bytes("key", k).Msg("Migrating file content.")
if err := content.Insert(string(k), v); err != nil {
return err
}
return b.Delete(k)
})
if err != nil {
log.Error().Err(err).Msg("Migration failed.")
}
tx.DeleteBucket(bucketContent)
log.Info().
Str("oldVersion", oldVersion).
Str("version", fsVersion).
Msg("Migrations complete.")
}
return versionBucket.Put([]byte("version"), []byte(fsVersion))
})
// ok, ready to start fs
fs := &Filesystem{
RawFileSystem: fuse.NewDefaultRawFileSystem(),
content: content,
db: db,
auth: auth,
opendirs: make(map[uint64][]*Inode),
}
rootItem, err := graph.GetItem("root", auth)
root := NewInodeDriveItem(rootItem)
if err != nil {
if graph.IsOffline(err) {
// no network, load from db if possible and go to read-only state
fs.Lock()
fs.offline = true
fs.Unlock()
if root = fs.GetID("root"); root == nil {
log.Fatal().Msg(
"We are offline and could not fetch the filesystem root item from disk.",
)
}
// when offline, we load the cache deltaLink from disk
fs.db.View(func(tx *bolt.Tx) error {
if link := tx.Bucket(bucketDelta).Get([]byte("deltaLink")); link != nil {
fs.deltaLink = string(link)
} else {
// Only reached if a previous online session never survived
// long enough to save its delta link. We explicitly disallow these
// types of startups as it's possible for things to get out of sync
// this way.
log.Fatal().Msg("Cannot perform an offline startup without a valid " +
"delta link from a previous session.")
}
return nil
})
} else {
log.Fatal().Err(err).Msg("Could not fetch root item of filesystem!")
}
}
// root inode is inode 1
fs.root = root.ID()
fs.InsertID(fs.root, root)
fs.uploads = NewUploadManager(2*time.Second, db, fs, auth)
if !fs.IsOffline() {
// .Trash-UID is used by "gio trash" for user trash, create it if it
// does not exist
trash := fmt.Sprintf(".Trash-%d", os.Getuid())
if child, _ := fs.GetChild(fs.root, trash, auth); child == nil {
item, err := graph.Mkdir(trash, fs.root, auth)
if err != nil {
log.Error().Err(err).
Msg("Could not create trash folder. " +
"Trashing items through the file browser may result in errors.")
} else {
fs.InsertID(item.ID, NewInodeDriveItem(item))
}
}
// using token=latest because we don't care about existing items - they'll
// be downloaded on-demand by the cache
fs.deltaLink = "/me/drive/root/delta?token=latest"
}
// deltaloop is started manually
return fs
}
// IsOffline returns whether or not the cache thinks its offline.
func (f *Filesystem) IsOffline() bool {
f.RLock()
defer f.RUnlock()
return f.offline
}
// TranslateID returns the DriveItemID for a given NodeID
func (f *Filesystem) TranslateID(nodeID uint64) string {
f.RLock()
defer f.RUnlock()
if nodeID > f.lastNodeID || nodeID == 0 {
return ""
}
return f.inodes[nodeID-1]
}
// GetNodeID fetches the inode for a particular inode ID.
func (f *Filesystem) GetNodeID(nodeID uint64) *Inode {
id := f.TranslateID(nodeID)
if id == "" {
return nil
}
return f.GetID(id)
}
// InsertNodeID assigns a numeric inode ID used by the kernel if one is not
// already assigned.
func (f *Filesystem) InsertNodeID(inode *Inode) uint64 {
nodeID := inode.NodeID()
if nodeID == 0 {
// lock ordering is to satisfy deadlock detector
inode.Lock()
f.Lock()
f.lastNodeID++
f.inodes = append(f.inodes, inode.DriveItem.ID)
nodeID = f.lastNodeID
inode.nodeID = nodeID
f.Unlock()
inode.Unlock()
}
return nodeID
}
// GetID gets an inode from the cache by ID. No API fetching is performed.
// Result is nil if no inode is found.
func (f *Filesystem) GetID(id string) *Inode {
entry, exists := f.metadata.Load(id)
if !exists {
// we allow fetching from disk as a fallback while offline (and it's also
// necessary while transitioning from offline->online)
var found *Inode
f.db.View(func(tx *bolt.Tx) error {
data := tx.Bucket(bucketMetadata).Get([]byte(id))
var err error
if data != nil {
found, err = NewInodeJSON(data)
}
return err
})
if found != nil {
f.InsertNodeID(found)
f.metadata.Store(id, found) // move to memory for next time
}
return found
}
return entry.(*Inode)
}
// InsertID inserts a single item into the filesystem by ID and sets its parent
// using the Inode.Parent.ID, if set. Must be called after DeleteID, if being
// used to rename/move an item. This is the main way new Inodes are added to the
// filesystem. Returns the Inode's numeric NodeID.
func (f *Filesystem) InsertID(id string, inode *Inode) uint64 {
f.metadata.Store(id, inode)
nodeID := f.InsertNodeID(inode)
if id != inode.ID() {
// we update the inode IDs here in case they do not match/changed
inode.Lock()
inode.DriveItem.ID = id
inode.Unlock()
f.Lock()
if nodeID <= f.lastNodeID {
f.inodes[nodeID-1] = id
} else {
log.Error().
Uint64("nodeID", nodeID).
Uint64("lastNodeID", f.lastNodeID).
Msg("NodeID exceeded maximum node ID! Ignoring ID change.")
}
f.Unlock()
}
parentID := inode.ParentID()
if parentID == "" {
// root item, or parent not set
return nodeID
}
parent := f.GetID(parentID)
if parent == nil {
log.Error().
Str("parentID", parentID).
Str("childID", id).
Str("childName", inode.Name()).
Msg("Parent item could not be found when setting parent.")
return nodeID
}
// check if the item has already been added to the parent
// Lock order is super key here, must go parent->child or the deadlock
// detector screams at us.
parent.Lock()
defer parent.Unlock()
for _, child := range parent.children {
if child == id {
// exit early, child cannot be added twice
return nodeID
}
}
// add to parent
if inode.IsDir() {
parent.subdir++
}
parent.children = append(parent.children, id)
return nodeID
}
// InsertChild adds an item as a child of a specified parent ID.
func (f *Filesystem) InsertChild(parentID string, child *Inode) uint64 {
child.Lock()
// should already be set, just double-checking here.
child.DriveItem.Parent.ID = parentID
id := child.DriveItem.ID
child.Unlock()
return f.InsertID(id, child)
}
// DeleteID deletes an item from the cache, and removes it from its parent. Must
// be called before InsertID if being used to rename/move an item.
func (f *Filesystem) DeleteID(id string) {
if inode := f.GetID(id); inode != nil {
parent := f.GetID(inode.ParentID())
parent.Lock()
for i, childID := range parent.children {
if childID == id {
parent.children = append(parent.children[:i], parent.children[i+1:]...)
if inode.IsDir() {
parent.subdir--
}
break
}
}
parent.Unlock()
}
f.metadata.Delete(id)
f.uploads.CancelUpload(id)
}
// GetChild fetches a named child of an item. Wraps GetChildrenID.
func (f *Filesystem) GetChild(id string, name string, auth *graph.Auth) (*Inode, error) {
children, err := f.GetChildrenID(id, auth)
if err != nil {
return nil, err
}
for _, child := range children {
if strings.EqualFold(child.Name(), name) {
return child, nil
}
}
return nil, errors.New("child does not exist")
}
// GetChildrenID grabs all DriveItems that are the children of the given ID. If
// items are not found, they are fetched.
func (f *Filesystem) GetChildrenID(id string, auth *graph.Auth) (map[string]*Inode, error) {
// fetch item and catch common errors
inode := f.GetID(id)
children := make(map[string]*Inode)
if inode == nil {
log.Error().Str("id", id).Msg("Inode not found in cache")
return children, errors.New(id + " not found in cache")
} else if !inode.IsDir() {
// Normal files are treated as empty folders. This only gets called if
// we messed up and tried to get the children of a plain-old file.
log.Warn().
Str("id", id).
Str("path", inode.Path()).
Msg("Attepted to get children of ordinary file")
return children, nil
}
// If item.children is not nil, it means we have the item's children
// already and can fetch them directly from the cache
inode.RLock()
if inode.children != nil {
// can potentially have out-of-date child metadata if started offline, but since
// changes are disallowed while offline, the children will be back in sync after
// the first successful delta fetch (which also brings the fs back online)
for _, childID := range inode.children {
child := f.GetID(childID)
if child == nil {
// will be nil if deleted or never existed
continue
}
children[strings.ToLower(child.Name())] = child
}
inode.RUnlock()
return children, nil
}
inode.RUnlock()
// We haven't fetched the children for this item yet, get them from the server.
fetched, err := graph.GetItemChildren(id, auth)
if err != nil {
if graph.IsOffline(err) {
log.Warn().Str("id", id).
Msg("We are offline, and no children found in cache. " +
"Pretending there are no children.")
return children, nil
}
// something else happened besides being offline
return nil, err
}
inode.Lock()
inode.children = make([]string, 0)
for _, item := range fetched {
// we will always have an id after fetching from the server
child := NewInodeDriveItem(item)
f.InsertNodeID(child)
f.metadata.Store(child.DriveItem.ID, child)
// store in result map
children[strings.ToLower(child.Name())] = child
// store id in parent item and increment parents subdirectory count
inode.children = append(inode.children, child.DriveItem.ID)
if child.IsDir() {
inode.subdir++
}
}
inode.Unlock()
return children, nil
}
// GetChildrenPath grabs all DriveItems that are the children of the resource at
// the path. If items are not found, they are fetched.
func (f *Filesystem) GetChildrenPath(path string, auth *graph.Auth) (map[string]*Inode, error) {
inode, err := f.GetPath(path, auth)
if err != nil {
return make(map[string]*Inode), err
}
return f.GetChildrenID(inode.ID(), auth)
}
// GetPath fetches a given DriveItem in the cache, if any items along the way are
// not found, they are fetched.
func (f *Filesystem) GetPath(path string, auth *graph.Auth) (*Inode, error) {
lastID := f.root
if path == "/" {
return f.GetID(lastID), nil
}
// from the root directory, traverse the chain of items till we reach our
// target ID.
path = strings.TrimSuffix(strings.ToLower(path), "/")
split := strings.Split(path, "/")[1:] //omit leading "/"
var inode *Inode
for i := 0; i < len(split); i++ {
// fetches children
children, err := f.GetChildrenID(lastID, auth)
if err != nil {
return nil, err
}
var exists bool // if we use ":=", item is shadowed
inode, exists = children[split[i]]
if !exists {
// the item still doesn't exist after fetching from server. it
// doesn't exist
return nil, errors.New(strings.Join(split[:i+1], "/") +
" does not exist on server or in local cache")
}
lastID = inode.ID()
}
return inode, nil
}
// DeletePath an item from the cache by path. Must be called before Insert if
// being used to move/rename an item.
func (f *Filesystem) DeletePath(key string) {
inode, _ := f.GetPath(strings.ToLower(key), nil)
if inode != nil {
f.DeleteID(inode.ID())
}
}
// InsertPath lets us manually insert an item to the cache (like if it was
// created locally). Overwrites a cached item if present. Must be called after
// delete if being used to move/rename an item.
func (f *Filesystem) InsertPath(key string, auth *graph.Auth, inode *Inode) (uint64, error) {
key = strings.ToLower(key)
// set the item.Parent.ID properly if the item hasn't been in the cache
// before or is being moved.
parent, err := f.GetPath(filepath.Dir(key), auth)
if err != nil {
return 0, err
} else if parent == nil {
const errMsg string = "parent of key was nil"
log.Error().
Str("key", key).
Str("path", inode.Path()).
Msg(errMsg)
return 0, errors.New(errMsg)
}
// Coded this way to make sure locks are in the same order for the deadlock
// detector (lock ordering needs to be the same as InsertID: Parent->Child).
parentID := parent.ID()
inode.Lock()
inode.DriveItem.Parent.ID = parentID
id := inode.DriveItem.ID
inode.Unlock()
return f.InsertID(id, inode), nil
}
// MoveID moves an item to a new ID name. Also responsible for handling the
// actual overwrite of the item's IDInternal field
func (f *Filesystem) MoveID(oldID string, newID string) error {
inode := f.GetID(oldID)
if inode == nil {
// It may have already been renamed. This is not an error. We assume
// that IDs will never collide. Re-perform the op if this is the case.
if inode = f.GetID(newID); inode == nil {
// nope, it just doesn't exist
return errors.New("Could not get item: " + oldID)
}
}
// need to rename the child under the parent
parent := f.GetID(inode.ParentID())
parent.Lock()
for i, child := range parent.children {
if child == oldID {
parent.children[i] = newID
break
}
}
parent.Unlock()
// now actually perform the metadata+content move
f.DeleteID(oldID)
f.InsertID(newID, inode)
if inode.IsDir() {
return nil
}
f.content.Move(oldID, newID)
return nil
}
// MovePath moves an item to a new position.
func (f *Filesystem) MovePath(oldParent, newParent, oldName, newName string, auth *graph.Auth) error {
inode, err := f.GetChild(oldParent, oldName, auth)
if err != nil {
return err
}
id := inode.ID()
f.DeleteID(id)
// this is the actual move op
inode.SetName(newName)
parent := f.GetID(newParent)
inode.Parent.ID = parent.DriveItem.ID
f.InsertID(id, inode)
return nil
}
// SerializeAll dumps all inode metadata currently in the cache to disk. This
// metadata is only used later if an item could not be found in memory AND the
// cache is offline. Old metadata is not removed, only overwritten (to avoid an
// offline session from wiping all metadata on a subsequent serialization).
func (f *Filesystem) SerializeAll() {
log.Debug().Msg("Serializing cache metadata to disk.")
allItems := make(map[string][]byte)
f.metadata.Range(func(k interface{}, v interface{}) bool {
// cannot occur within bolt transaction because acquiring the inode lock
// with AsJSON locks out other boltdb transactions
id := fmt.Sprint(k)
allItems[id] = v.(*Inode).AsJSON()
return true
})
/*
One transaction to serialize them all,
One transaction to find them,
One transaction to bring them all
and in the darkness write them.
*/
f.db.Batch(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketMetadata)
for k, v := range allItems {
b.Put([]byte(k), v)
if k == f.root {
// root item must be updated manually (since there's actually
// two copies)
b.Put([]byte("root"), v)
}
}
return nil
})
}
onedriver-0.14.1/fs/cache_test.go 0000664 0000000 0000000 00000003535 14513675524 0016655 0 ustar 00root root 0000000 0000000 // these tests are independent of the mounted fs
package fs
import (
"fmt"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestRootGet(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_root_get"))
root, err := cache.GetPath("/", auth)
require.NoError(t, err)
assert.Equal(t, "/", root.Path(), "Root path did not resolve correctly.")
}
func TestRootChildrenUpdate(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_root_children_update"))
children, err := cache.GetChildrenPath("/", auth)
require.NoError(t, err)
if _, exists := children["documents"]; !exists {
t.Fatal("Could not find documents folder.")
}
}
func TestSubdirGet(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_subdir_get"))
documents, err := cache.GetPath("/Documents", auth)
require.NoError(t, err)
assert.Equal(t, "Documents", documents.Name(), "Failed to fetch \"/Documents\".")
}
func TestSubdirChildrenUpdate(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_subdir_children_update"))
children, err := cache.GetChildrenPath("/Documents", auth)
require.NoError(t, err)
if _, exists := children["documents"]; exists {
fmt.Println("Documents directory found inside itself. " +
"Likely the cache did not traverse correctly.\n\nChildren:")
for key := range children {
fmt.Println(key)
}
t.FailNow()
}
}
func TestSamePointer(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_same_pointer"))
item, _ := cache.GetPath("/Documents", auth)
item2, _ := cache.GetPath("/Documents", auth)
if item != item2 {
t.Fatalf("Pointers to cached items do not match: %p != %p\n", item, item2)
}
assert.NotNil(t, item)
}
onedriver-0.14.1/fs/content_cache.go 0000664 0000000 0000000 00000005231 14513675524 0017343 0 ustar 00root root 0000000 0000000 package fs
import (
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sync"
)
// LoopbackCache stores the content for files under a folder as regular files
type LoopbackCache struct {
directory string
fds sync.Map
}
func NewLoopbackCache(directory string) *LoopbackCache {
os.Mkdir(directory, 0700)
return &LoopbackCache{
directory: directory,
fds: sync.Map{},
}
}
// contentPath returns the path for the given content file
func (l *LoopbackCache) contentPath(id string) string {
return filepath.Join(l.directory, id)
}
// Get reads a file's content from disk.
func (l *LoopbackCache) Get(id string) []byte {
content, _ := ioutil.ReadFile(l.contentPath(id))
return content
}
// InsertContent writes file content to disk in a single bulk insert.
func (l *LoopbackCache) Insert(id string, content []byte) error {
return ioutil.WriteFile(l.contentPath(id), content, 0600)
}
// InsertStream inserts a stream of data
func (l *LoopbackCache) InsertStream(id string, reader io.Reader) (int64, error) {
fd, err := l.Open(id)
if err != nil {
return 0, err
}
return io.Copy(fd, reader)
}
// Delete closes the fd AND deletes content from disk.
func (l *LoopbackCache) Delete(id string) error {
l.Close(id)
return os.Remove(l.contentPath(id))
}
// Move moves content from one ID to another
func (l *LoopbackCache) Move(oldID string, newID string) error {
return os.Rename(l.contentPath(oldID), l.contentPath(newID))
}
// IsOpen returns true if the file is already opened somewhere
func (l *LoopbackCache) IsOpen(id string) bool {
_, ok := l.fds.Load(id)
return ok
}
// HasContent is used to find if we have a file or not in cache (in any state)
func (l *LoopbackCache) HasContent(id string) bool {
// is it already open?
_, ok := l.fds.Load(id)
if ok {
return ok
}
// is it on disk?
_, err := os.Stat(l.contentPath(id))
return err == nil
}
// Open returns a filehandle for subsequent access
func (l *LoopbackCache) Open(id string) (*os.File, error) {
if fd, ok := l.fds.Load(id); ok {
// already opened, return existing fd
return fd.(*os.File), nil
}
fd, err := os.OpenFile(l.contentPath(id), os.O_CREATE|os.O_RDWR, 0600)
if err != nil {
return nil, err
}
// Since we explicitly want to store *os.Files, we need to prevent the Go
// GC from trying to be "helpful" and closing files for us behind the
// scenes.
// https://github.com/hanwen/go-fuse/issues/371#issuecomment-694799535
runtime.SetFinalizer(fd, nil)
l.fds.Store(id, fd)
return fd, nil
}
// Close closes the currently open fd
func (l *LoopbackCache) Close(id string) {
if fd, ok := l.fds.Load(id); ok {
file := fd.(*os.File)
file.Sync()
file.Close()
l.fds.Delete(id)
}
}
onedriver-0.14.1/fs/delta.go 0000664 0000000 0000000 00000016157 14513675524 0015650 0 ustar 00root root 0000000 0000000 package fs
import (
"encoding/json"
"errors"
"strings"
"time"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog/log"
bolt "go.etcd.io/bbolt"
)
// DeltaLoop creates a new thread to poll the server for changes and should be
// called as a goroutine
func (f *Filesystem) DeltaLoop(interval time.Duration) {
log.Trace().Msg("Starting delta goroutine.")
for { // eva
// get deltas
log.Trace().Msg("Fetching deltas from server.")
pollSuccess := false
deltas := make(map[string]*graph.DriveItem)
for {
incoming, cont, err := f.pollDeltas(f.auth)
if err != nil {
// the only thing that should be able to bring the FS out
// of a read-only state is a successful delta call
log.Error().Err(err).
Msg("Error during delta fetch, marking fs as offline.")
f.Lock()
f.offline = true
f.Unlock()
break
}
for _, delta := range incoming {
// As per the API docs, the last delta received from the server
// for an item is the one we should use.
deltas[delta.ID] = delta
}
if !cont {
log.Info().Msgf("Fetched %d deltas.", len(deltas))
pollSuccess = true
break
}
}
// now apply deltas
secondPass := make([]string, 0)
for _, delta := range deltas {
err := f.applyDelta(delta)
// retry deletion of non-empty directories after all other deltas applied
if err != nil && err.Error() == "directory is non-empty" {
secondPass = append(secondPass, delta.ID)
}
}
for _, id := range secondPass {
// failures should explicitly be ignored the second time around as per docs
f.applyDelta(deltas[id])
}
if !f.IsOffline() {
f.SerializeAll()
}
if pollSuccess {
f.Lock()
if f.offline {
log.Info().Msg("Delta fetch success, marking fs as online.")
}
f.offline = false
f.Unlock()
f.db.Batch(func(tx *bolt.Tx) error {
return tx.Bucket(bucketDelta).Put([]byte("deltaLink"), []byte(f.deltaLink))
})
// wait until next interval
time.Sleep(interval)
} else {
// shortened duration while offline
time.Sleep(2 * time.Second)
}
}
}
type deltaResponse struct {
NextLink string `json:"@odata.nextLink,omitempty"`
DeltaLink string `json:"@odata.deltaLink,omitempty"`
Values []*graph.DriveItem `json:"value,omitempty"`
}
// Polls the delta endpoint and return deltas + whether or not to continue
// polling. Does not perform deduplication. Note that changes from the local
// client will actually appear as deltas from the server (there is no
// distinction between local and remote changes from the server's perspective,
// everything is a delta, regardless of where it came from).
func (f *Filesystem) pollDeltas(auth *graph.Auth) ([]*graph.DriveItem, bool, error) {
resp, err := graph.Get(f.deltaLink, auth)
if err != nil {
return make([]*graph.DriveItem, 0), false, err
}
page := deltaResponse{}
json.Unmarshal(resp, &page)
// If the server does not provide a `@odata.nextLink` item, it means we've
// reached the end of this polling cycle and should not continue until the
// next poll interval.
if page.NextLink != "" {
f.deltaLink = strings.TrimPrefix(page.NextLink, graph.GraphURL)
return page.Values, true, nil
}
f.deltaLink = strings.TrimPrefix(page.DeltaLink, graph.GraphURL)
return page.Values, false, nil
}
// applyDelta diagnoses and applies a server-side change to our local state.
// Things we care about (present in the local cache):
// * Deleted items
// * Changed content remotely, but not locally
// * New items in a folder we have locally
func (f *Filesystem) applyDelta(delta *graph.DriveItem) error {
id := delta.ID
name := delta.Name
parentID := delta.Parent.ID
ctx := log.With().
Str("id", id).
Str("parentID", parentID).
Str("name", name).
Logger()
ctx.Debug().Msg("Applying delta")
// diagnose and act on what type of delta we're dealing with
// do we have it at all?
if parent := f.GetID(parentID); parent == nil {
// Nothing needs to be applied, item not in cache, so latest copy will
// be pulled down next time it's accessed.
ctx.Trace().
Str("delta", "skip").
Msg("Skipping delta, item's parent not in cache.")
return nil
}
local := f.GetID(id)
// was it deleted?
if delta.Deleted != nil {
if delta.IsDir() && local != nil && local.HasChildren() {
// from docs: you should only delete a folder locally if it is empty
// after syncing all the changes.
ctx.Warn().Str("delta", "delete").
Msg("Refusing delta deletion of non-empty folder as per API docs.")
return errors.New("directory is non-empty")
}
ctx.Info().Str("delta", "delete").
Msg("Applying server-side deletion of item.")
f.DeleteID(id)
return nil
}
// does the item exist locally? if not, add the delta to the cache under the
// appropriate parent
if local == nil {
// check if we don't have it here first
local, _ = f.GetChild(parentID, name, nil)
if local != nil {
localID := local.ID()
ctx.Info().
Str("localID", localID).
Msg("Local item already exists under different ID.")
if isLocalID(localID) {
if err := f.MoveID(localID, id); err != nil {
ctx.Error().
Str("localID", localID).
Err(err).
Msg("Could not move item to new, nonlocal ID!")
}
}
} else {
ctx.Info().Str("delta", "create").
Msg("Creating inode from delta.")
f.InsertChild(parentID, NewInodeDriveItem(delta))
return nil
}
}
// was the item moved?
localName := local.Name()
if local.ParentID() != parentID || local.Name() != name {
log.Info().
Str("parent", local.ParentID()).
Str("name", localName).
Str("newParent", parentID).
Str("newName", name).
Str("id", id).
Str("delta", "rename").
Msg("Applying server-side rename")
oldParentID := local.ParentID()
// local rename only
f.MovePath(oldParentID, parentID, localName, name, f.auth)
// do not return, there may be additional changes
}
// Finally, check if the content/metadata of the remote has changed.
// "Interesting" changes must be synced back to our local state without
// data loss or corruption. Currently the only thing the local filesystem
// actually modifies remotely is the actual file data, so we simply accept
// the remote metadata changes that do not deal with the file's content
// changing.
if delta.ModTimeUnix() > local.ModTime() && !delta.ETagIsMatch(local.ETag) {
sameContent := false
if !delta.IsDir() && delta.File != nil {
local.RLock()
sameContent = local.VerifyChecksum(delta.File.Hashes.QuickXorHash)
local.RUnlock()
}
if !sameContent {
//TODO check if local has changes and rename the server copy if so
ctx.Info().Str("delta", "overwrite").
Msg("Overwriting local item, no local changes to preserve.")
// update modtime, hashes, purge any local content in memory
local.Lock()
defer local.Unlock()
local.DriveItem.ModTime = delta.ModTime
local.DriveItem.Size = delta.Size
local.DriveItem.ETag = delta.ETag
// the rest of these are harmless when this is a directory
// as they will be null anyways
local.DriveItem.File = delta.File
local.hasChanges = false
return nil
}
}
ctx.Trace().Str("delta", "skip").Msg("Skipping, no changes relative to local state.")
return nil
}
onedriver-0.14.1/fs/delta_test.go 0000664 0000000 0000000 00000026352 14513675524 0016705 0 ustar 00root root 0000000 0000000 // Run tests to verify that we are syncing changes from the server.
package fs
import (
"bytes"
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/fs/graph"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// a helper function for use with tests
func (i *Inode) setContent(f *Filesystem, newContent []byte) {
i.DriveItem.Size = uint64(len(newContent))
now := time.Now()
i.DriveItem.ModTime = &now
f.content.Insert(i.ID(), newContent)
if i.DriveItem.File == nil {
i.DriveItem.File = &graph.File{}
}
i.DriveItem.File.Hashes.QuickXorHash = graph.QuickXORHash(&newContent)
}
// In this test, we create a directory through the API, and wait to see if
// the cache picks it up post-creation.
func TestDeltaMkdir(t *testing.T) {
t.Parallel()
parent, err := graph.GetItemPath("/onedriver_tests/delta", auth)
require.NoError(t, err)
// create the directory directly through the API and bypass the cache
_, err = graph.Mkdir("first", parent.ID, auth)
require.NoError(t, err)
fname := filepath.Join(DeltaDir, "first")
// give the delta thread time to fetch the item
assert.Eventuallyf(t, func() bool {
st, err := os.Stat(fname)
if err == nil {
if st.Mode().IsDir() {
return true
}
t.Fatalf("%s was not a directory", fname)
}
return false
}, retrySeconds, time.Second, "%s not found", fname)
}
// We create a directory through the cache, then delete through the API and see
// if the cache picks it up.
func TestDeltaRmdir(t *testing.T) {
t.Parallel()
fname := filepath.Join(DeltaDir, "delete_me")
require.NoError(t, os.Mkdir(fname, 0755))
item, err := graph.GetItemPath("/onedriver_tests/delta/delete_me", auth)
require.NoError(t, err)
require.NoError(t, graph.Remove(item.ID, auth))
// wait for delta sync
assert.Eventually(t, func() bool {
_, err := os.Stat(fname)
return err == nil
}, retrySeconds, time.Second, "File deletion not picked up by client")
}
// Create a file locally, then rename it remotely and verify that the renamed
// file still has the correct content under the new parent.
func TestDeltaRename(t *testing.T) {
t.Parallel()
require.NoError(t, ioutil.WriteFile(
filepath.Join(DeltaDir, "delta_rename_start"),
[]byte("cheesecake"),
0644,
))
var item *graph.DriveItem
var err error
require.Eventually(t, func() bool {
item, err = graph.GetItemPath("/onedriver_tests/delta/delta_rename_start", auth)
return err == nil
}, 10*time.Second, time.Second, "Could not prepare /onedriver_test/delta/delta_rename_start")
inode := NewInodeDriveItem(item)
require.NoError(t, graph.Rename(inode.ID(), "delta_rename_end", inode.ParentID(), auth))
fpath := filepath.Join(DeltaDir, "delta_rename_end")
assert.Eventually(t, func() bool {
if _, err := os.Stat(fpath); err == nil {
content, err := ioutil.ReadFile(fpath)
require.NoError(t, err)
return bytes.Contains(content, []byte("cheesecake"))
}
return false
}, retrySeconds, time.Second, "Rename not detected by client.")
}
// Create a file locally, then move it on the server to a new directory. Check
// to see if the cache picks it up.
func TestDeltaMoveParent(t *testing.T) {
t.Parallel()
require.NoError(t, ioutil.WriteFile(
filepath.Join(DeltaDir, "delta_move_start"),
[]byte("carrotcake"),
0644,
))
time.Sleep(time.Second)
var item *graph.DriveItem
var err error
require.Eventually(t, func() bool {
item, err = graph.GetItemPath("/onedriver_tests/delta/delta_move_start", auth)
return err == nil
}, 10*time.Second, time.Second)
newParent, err := graph.GetItemPath("/onedriver_tests/", auth)
require.NoError(t, err)
require.NoError(t, graph.Rename(item.ID, "delta_rename_end", newParent.ID, auth))
fpath := filepath.Join(TestDir, "delta_rename_end")
assert.Eventually(t, func() bool {
if _, err := os.Stat(fpath); err == nil {
content, err := ioutil.ReadFile(fpath)
require.NoError(t, err)
return bytes.Contains(content, []byte("carrotcake"))
}
return false
}, retrySeconds, time.Second, "Rename not detected by client")
}
// Change the content remotely on the server, and verify it gets propagated to
// to the client.
func TestDeltaContentChangeRemote(t *testing.T) {
t.Parallel()
require.NoError(t, ioutil.WriteFile(
filepath.Join(DeltaDir, "remote_content"),
[]byte("the cake is a lie"),
0644,
))
// change and upload it via the API
time.Sleep(time.Second * 10)
item, err := graph.GetItemPath("/onedriver_tests/delta/remote_content", auth)
inode := NewInodeDriveItem(item)
require.NoError(t, err)
newContent := []byte("because it has been changed remotely!")
inode.setContent(fs, newContent)
data := fs.content.Get(inode.ID())
session, err := NewUploadSession(inode, &data)
require.NoError(t, err)
require.NoError(t, session.Upload(auth))
time.Sleep(time.Second * 10)
body, _, _ := graph.GetItemContent(inode.ID(), auth)
if !bytes.Equal(body, newContent) {
t.Fatalf("Failed to upload test file. Remote content: \"%s\"", body)
}
var content []byte
assert.Eventuallyf(t, func() bool {
content, err = ioutil.ReadFile(filepath.Join(DeltaDir, "remote_content"))
require.NoError(t, err)
return bytes.Equal(content, newContent)
}, retrySeconds, time.Second,
"Failed to sync content to local machine. Got content: \"%s\". "+
"Wanted: \"because it has been changed remotely!\". "+
"Remote content: \"%s\".",
string(content), string(body),
)
}
// Change the content both on the server and the client and verify that the
// client data is preserved.
func TestDeltaContentChangeBoth(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_delta_content_change_both"))
inode := NewInode("both_content_changed.txt", 0644|fuse.S_IFREG, nil)
cache.InsertPath("/both_content_changed.txt", nil, inode)
original := []byte("initial content")
inode.setContent(cache, original)
// write to, but do not close the file to simulate an in-use local file
local := []byte("local write content")
_, status := cache.Write(
context.Background().Done(),
&fuse.WriteIn{
InHeader: fuse.InHeader{NodeId: inode.NodeID()},
Offset: 0,
Size: uint32(len(local)),
},
local,
)
if status != fuse.OK {
t.Fatal("Write failed")
}
// apply a fake delta to the local item
fakeDelta := inode.DriveItem
now := time.Now().Add(time.Second * 10)
fakeDelta.ModTime = &now
fakeDelta.Size = uint64(len(original))
fakeDelta.ETag = "sldfjlsdjflkdj"
fakeDelta.File.Hashes = graph.Hashes{
QuickXorHash: graph.QuickXORHash(&original),
}
// should do nothing
require.NoError(t, cache.applyDelta(&fakeDelta))
require.Equal(t, uint64(len(local)), inode.Size(), "Contents of open local file changed!")
// act as if the file is now flushed (these are the ops that would happen during
// a flush)
inode.DriveItem.File = &graph.File{}
fd, _ := fs.content.Open(inode.ID())
inode.DriveItem.File.Hashes.QuickXorHash = graph.QuickXORHashStream(fd)
cache.content.Close(inode.DriveItem.ID)
inode.hasChanges = false
// should now change the file
require.NoError(t, cache.applyDelta(&fakeDelta))
require.Equal(t, fakeDelta.Size, inode.Size(),
"Contents of local file was not changed after disabling local changes!")
}
// If we have local content in the local disk cache that doesn't match what the
// server has, Open() should pick this up and wipe it. Otherwise Open() could
// pick up an old version of a file from previous program startups and think
// it's current, which would erase the real, up-to-date server copy.
func TestDeltaBadContentInCache(t *testing.T) {
t.Parallel()
// write a file to the server and poll until it exists
require.NoError(t, ioutil.WriteFile(
filepath.Join(DeltaDir, "corrupted"),
[]byte("correct contents"),
0644,
))
var id string
require.Eventually(t, func() bool {
item, err := graph.GetItemPath("/onedriver_tests/delta/corrupted", auth)
if err == nil {
id = item.ID
return true
}
return false
}, retrySeconds, time.Second)
fs.content.Insert(id, []byte("wrong contents"))
contents, err := ioutil.ReadFile(filepath.Join(DeltaDir, "corrupted"))
require.NoError(t, err)
if bytes.HasPrefix(contents, []byte("wrong")) {
t.Fatalf("File contents were wrong! Got \"%s\", wanted \"correct contents\"",
string(contents))
}
}
// Check that folders are deleted only when empty after syncing the complete set of
// changes.
func TestDeltaFolderDeletion(t *testing.T) {
t.Parallel()
require.NoError(t, os.MkdirAll(filepath.Join(DeltaDir, "nested/directory"), 0755))
nested, err := graph.GetItemPath("/onedriver_tests/delta/nested", auth)
require.NoError(t, err)
require.NoError(t, graph.Remove(nested.ID, auth))
// now poll and wait for deletion
assert.Eventually(t, func() bool {
inodes, _ := ioutil.ReadDir(DeltaDir)
for _, inode := range inodes {
if inode.Name() == "nested" {
return true
}
}
return false
}, retrySeconds, time.Second, "\"nested/\" directory was not deleted.")
}
// We should only perform a delta deletion of a folder if it was nonempty
func TestDeltaFolderDeletionNonEmpty(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_delta_folder_deletion_nonempty"))
dir := NewInode("folder", 0755|fuse.S_IFDIR, nil)
file := NewInode("file", 0644|fuse.S_IFREG, nil)
cache.InsertPath("/folder", nil, dir)
cache.InsertPath("/folder/file", nil, file)
delta := &graph.DriveItem{
ID: dir.ID(),
Parent: &graph.DriveItemParent{ID: dir.ParentID()},
Deleted: &graph.Deleted{State: "softdeleted"},
Folder: &graph.Folder{},
}
err := cache.applyDelta(delta)
require.NotNil(t, cache.GetID(delta.ID), "Folder should still be present")
require.Error(t, err, "A delta deletion of a non-empty folder was not an error")
cache.DeletePath("/folder/file")
cache.applyDelta(delta)
assert.Nil(t, cache.GetID(delta.ID),
"Still found folder after emptying it first (the correct way).")
}
// Some programs like LibreOffice and WPS Office will have a fit if the
// modification times on their lockfiles is updated after they are written. This
// test verifies that the delta thread does not modify modification times if the
// content is unchanged.
func TestDeltaNoModTimeUpdate(t *testing.T) {
t.Parallel()
fname := filepath.Join(DeltaDir, "mod_time_update.txt")
require.NoError(t, ioutil.WriteFile(fname, []byte("a pretend lockfile"), 0644))
finfo, err := os.Stat(fname)
require.NoError(t, err)
mtimeOriginal := finfo.ModTime()
time.Sleep(15 * time.Second)
finfo, err = os.Stat(fname)
require.NoError(t, err)
mtimeNew := finfo.ModTime()
if !mtimeNew.Equal(mtimeOriginal) {
t.Fatalf(
"Modification time was updated even though the file did not change.\n"+
"Old mtime: %d, New mtime: %d\n", mtimeOriginal.Unix(), mtimeNew.Unix(),
)
}
}
// deltas can come back missing from the server
// https://github.com/jstaf/onedriver/issues/111
func TestDeltaMissingHash(t *testing.T) {
t.Parallel()
cache := NewFilesystem(auth, filepath.Join(testDBLoc, "test_delta_missing_hash"))
file := NewInode("file", 0644|fuse.S_IFREG, nil)
cache.InsertPath("/folder", nil, file)
time.Sleep(time.Second)
now := time.Now()
delta := &graph.DriveItem{
ID: file.ID(),
Parent: &graph.DriveItemParent{ID: file.ParentID()},
ModTime: &now,
Size: 12345,
}
cache.applyDelta(delta)
// if we survive to here without a segfault, test passed
}
onedriver-0.14.1/fs/fs.go 0000664 0000000 0000000 00000054452 14513675524 0015167 0 ustar 00root root 0000000 0000000 package fs
import (
"io"
"math"
"os"
"path/filepath"
"regexp"
"strings"
"syscall"
"time"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog/log"
)
const timeout = time.Second
func (f *Filesystem) getInodeContent(i *Inode) *[]byte {
i.RLock()
defer i.RUnlock()
data := f.content.Get(i.DriveItem.ID)
return &data
}
// remoteID uploads a file to obtain a Onedrive ID if it doesn't already
// have one. This is necessary to avoid race conditions against uploads if the
// file has not already been uploaded.
func (f *Filesystem) remoteID(i *Inode) (string, error) {
if i.IsDir() {
// Directories are always created with an ID. (And this method is only
// really used for files anyways...)
return i.ID(), nil
}
originalID := i.ID()
if isLocalID(originalID) && f.auth.AccessToken != "" {
// perform a blocking upload of the item
data := f.getInodeContent(i)
session, err := NewUploadSession(i, data)
if err != nil {
return originalID, err
}
i.Lock()
name := i.DriveItem.Name
err = session.Upload(f.auth)
if err != nil {
i.Unlock()
if strings.Contains(err.Error(), "nameAlreadyExists") {
// A file with this name already exists on the server, get its ID and
// use that. This is probably the same file, but just got uploaded
// earlier.
children, err := graph.GetItemChildren(i.ParentID(), f.auth)
if err != nil {
return originalID, err
}
for _, child := range children {
if child.Name == name {
log.Info().
Str("name", name).
Str("originalID", originalID).
Str("newID", child.ID).
Msg("Exchanged ID.")
return child.ID, f.MoveID(originalID, child.ID)
}
}
}
// failed to obtain an ID, return whatever it was beforehand
return originalID, err
}
// we just successfully uploaded a copy, no need to do it again
i.hasChanges = false
i.DriveItem.ETag = session.ETag
i.Unlock()
// this is all we really wanted from this transaction
err = f.MoveID(originalID, session.ID)
log.Info().
Str("name", name).
Str("originalID", originalID).
Str("newID", session.ID).
Msg("Exchanged ID.")
return session.ID, err
}
return originalID, nil
}
var disallowedRexp = regexp.MustCompile(`(?i)LPT[0-9]|COM[0-9]|_vti_|["*:<>?\/\\\|]`)
// isNameRestricted returns true if the name is disallowed according to the doc here:
// https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa
func isNameRestricted(name string) bool {
if strings.EqualFold(name, "CON") {
return true
}
if strings.EqualFold(name, "AUX") {
return true
}
if strings.EqualFold(name, "PRN") {
return true
}
if strings.EqualFold(name, "NUL") {
return true
}
if strings.EqualFold(name, ".lock") {
return true
}
if strings.EqualFold(name, "desktop.ini") {
return true
}
return disallowedRexp.FindStringIndex(name) != nil
}
// Statfs returns information about the filesystem. Mainly useful for checking
// quotas and storage limits.
func (f *Filesystem) StatFs(cancel <-chan struct{}, in *fuse.InHeader, out *fuse.StatfsOut) fuse.Status {
ctx := log.With().Str("op", "StatFs").Logger()
ctx.Debug().Msg("")
drive, err := graph.GetDrive(f.auth)
if err != nil {
return fuse.EREMOTEIO
}
if drive.DriveType == graph.DriveTypePersonal {
ctx.Warn().Msg("Personal OneDrive accounts do not show number of files, " +
"inode counts reported by onedriver will be bogus.")
} else if drive.Quota.Total == 0 { // <-- check for if microsoft ever fixes their API
ctx.Warn().Msg("OneDrive for Business accounts do not report quotas, " +
"pretending the quota is 5TB and it's all unused.")
drive.Quota.Total = 5 * uint64(math.Pow(1024, 4))
drive.Quota.Remaining = 5 * uint64(math.Pow(1024, 4))
drive.Quota.FileCount = 0
}
// limits are pasted from https://support.microsoft.com/en-us/help/3125202
const blkSize uint64 = 4096 // default ext4 block size
out.Bsize = uint32(blkSize)
out.Blocks = drive.Quota.Total / blkSize
out.Bfree = drive.Quota.Remaining / blkSize
out.Bavail = drive.Quota.Remaining / blkSize
out.Files = 100000
out.Ffree = 100000 - drive.Quota.FileCount
out.NameLen = 260
return fuse.OK
}
// Mkdir creates a directory.
func (f *Filesystem) Mkdir(cancel <-chan struct{}, in *fuse.MkdirIn, name string, out *fuse.EntryOut) fuse.Status {
if isNameRestricted(name) {
return fuse.EINVAL
}
inode := f.GetNodeID(in.NodeId)
if inode == nil {
return fuse.ENOENT
}
id := inode.ID()
path := filepath.Join(inode.Path(), name)
ctx := log.With().
Str("op", "Mkdir").
Uint64("nodeID", in.NodeId).
Str("id", id).
Str("path", path).
Str("mode", Octal(in.Mode)).
Logger()
ctx.Debug().Msg("")
// create the new directory on the server
item, err := graph.Mkdir(name, id, f.auth)
if err != nil {
ctx.Error().Err(err).Msg("Could not create remote directory!")
return fuse.EREMOTEIO
}
newInode := NewInodeDriveItem(item)
newInode.mode = in.Mode | fuse.S_IFDIR
out.NodeId = f.InsertChild(id, newInode)
out.Attr = newInode.makeAttr()
out.SetAttrTimeout(timeout)
out.SetEntryTimeout(timeout)
return fuse.OK
}
// Rmdir removes a directory if it's empty.
func (f *Filesystem) Rmdir(cancel <-chan struct{}, in *fuse.InHeader, name string) fuse.Status {
parentID := f.TranslateID(in.NodeId)
if parentID == "" {
return fuse.ENOENT
}
child, _ := f.GetChild(parentID, name, f.auth)
if child == nil {
return fuse.ENOENT
}
if child.HasChildren() {
return fuse.Status(syscall.ENOTEMPTY)
}
return f.Unlink(cancel, in, name)
}
// ReadDir provides a list of all the entries in the directory
func (f *Filesystem) OpenDir(cancel <-chan struct{}, in *fuse.OpenIn, out *fuse.OpenOut) fuse.Status {
id := f.TranslateID(in.NodeId)
dir := f.GetID(id)
if dir == nil {
return fuse.ENOENT
}
if !dir.IsDir() {
return fuse.ENOTDIR
}
path := dir.Path()
ctx := log.With().
Str("op", "OpenDir").
Uint64("nodeID", in.NodeId).
Str("id", id).
Str("path", path).Logger()
ctx.Debug().Msg("")
children, err := f.GetChildrenID(id, f.auth)
if err != nil {
// not an item not found error (Lookup/Getattr will always be called
// before Readdir()), something has happened to our connection
ctx.Error().Err(err).Msg("Could not fetch children")
return fuse.EREMOTEIO
}
parent := f.GetID(dir.ParentID())
if parent == nil {
// This is the parent of the mountpoint. The FUSE kernel module discards
// this info, so what we put here doesn't actually matter.
parent = NewInode("..", 0755|fuse.S_IFDIR, nil)
parent.nodeID = math.MaxUint64
}
entries := make([]*Inode, 2)
entries[0] = dir
entries[1] = parent
for _, child := range children {
entries = append(entries, child)
}
f.opendirsM.Lock()
f.opendirs[in.NodeId] = entries
f.opendirsM.Unlock()
return fuse.OK
}
// ReleaseDir closes a directory and purges it from memory
func (f *Filesystem) ReleaseDir(in *fuse.ReleaseIn) {
f.opendirsM.Lock()
delete(f.opendirs, in.NodeId)
f.opendirsM.Unlock()
}
// ReadDirPlus reads an individual directory entry AND does a lookup.
func (f *Filesystem) ReadDirPlus(cancel <-chan struct{}, in *fuse.ReadIn, out *fuse.DirEntryList) fuse.Status {
f.opendirsM.RLock()
entries, ok := f.opendirs[in.NodeId]
f.opendirsM.RUnlock()
if !ok {
// readdir can sometimes arrive before the corresponding opendir, so we force it
f.OpenDir(cancel, &fuse.OpenIn{InHeader: in.InHeader}, nil)
f.opendirsM.RLock()
entries, ok = f.opendirs[in.NodeId]
f.opendirsM.RUnlock()
if !ok {
return fuse.EBADF
}
}
if in.Offset >= uint64(len(entries)) {
// just tried to seek past end of directory, we're all done!
return fuse.OK
}
inode := entries[in.Offset]
entry := fuse.DirEntry{
Ino: inode.NodeID(),
Mode: inode.Mode(),
}
// first two entries will always be "." and ".."
switch in.Offset {
case 0:
entry.Name = "."
case 1:
entry.Name = ".."
default:
entry.Name = inode.Name()
}
entryOut := out.AddDirLookupEntry(entry)
if entryOut == nil {
//FIXME probably need to handle this better using the "overflow stuff"
log.Error().
Str("op", "ReadDirPlus").
Uint64("nodeID", in.NodeId).
Uint64("offset", in.Offset).
Str("entryName", entry.Name).
Uint64("entryNodeID", entry.Ino).
Msg("Exceeded DirLookupEntry bounds!")
return fuse.EIO
}
entryOut.NodeId = entry.Ino
entryOut.Attr = inode.makeAttr()
entryOut.SetAttrTimeout(timeout)
entryOut.SetEntryTimeout(timeout)
return fuse.OK
}
// ReadDir reads a directory entry. Usually doesn't get called (ReadDirPlus is
// typically used).
func (f *Filesystem) ReadDir(cancel <-chan struct{}, in *fuse.ReadIn, out *fuse.DirEntryList) fuse.Status {
f.opendirsM.RLock()
entries, ok := f.opendirs[in.NodeId]
f.opendirsM.RUnlock()
if !ok {
// readdir can sometimes arrive before the corresponding opendir, so we force it
f.OpenDir(cancel, &fuse.OpenIn{InHeader: in.InHeader}, nil)
f.opendirsM.RLock()
entries, ok = f.opendirs[in.NodeId]
f.opendirsM.RUnlock()
if !ok {
return fuse.EBADF
}
}
if in.Offset >= uint64(len(entries)) {
// just tried to seek past end of directory, we're all done!
return fuse.OK
}
inode := entries[in.Offset]
entry := fuse.DirEntry{
Ino: inode.NodeID(),
Mode: inode.Mode(),
}
// first two entries will always be "." and ".."
switch in.Offset {
case 0:
entry.Name = "."
case 1:
entry.Name = ".."
default:
entry.Name = inode.Name()
}
out.AddDirEntry(entry)
return fuse.OK
}
// Lookup is called by the kernel when the VFS wants to know about a file inside
// a directory.
func (f *Filesystem) Lookup(cancel <-chan struct{}, in *fuse.InHeader, name string, out *fuse.EntryOut) fuse.Status {
id := f.TranslateID(in.NodeId)
log.Trace().
Str("op", "Lookup").
Uint64("nodeID", in.NodeId).
Str("id", id).
Str("name", name).
Msg("")
child, _ := f.GetChild(id, strings.ToLower(name), f.auth)
if child == nil {
return fuse.ENOENT
}
out.NodeId = child.NodeID()
out.Attr = child.makeAttr()
out.SetAttrTimeout(timeout)
out.SetEntryTimeout(timeout)
return fuse.OK
}
// Mknod creates a regular file. The server doesn't have this yet.
func (f *Filesystem) Mknod(cancel <-chan struct{}, in *fuse.MknodIn, name string, out *fuse.EntryOut) fuse.Status {
if isNameRestricted(name) {
return fuse.EINVAL
}
parentID := f.TranslateID(in.NodeId)
if parentID == "" {
return fuse.EBADF
}
parent := f.GetID(parentID)
if parent == nil {
return fuse.ENOENT
}
path := filepath.Join(parent.Path(), name)
ctx := log.With().
Str("op", "Mknod").
Uint64("nodeID", in.NodeId).
Str("path", path).
Logger()
if f.IsOffline() {
ctx.Warn().Msg("We are offline. Refusing Mknod() to avoid data loss later.")
return fuse.EROFS
}
if child, _ := f.GetChild(parentID, name, f.auth); child != nil {
return fuse.Status(syscall.EEXIST)
}
inode := NewInode(name, in.Mode, parent)
ctx.Debug().
Str("childID", inode.ID()).
Str("mode", Octal(in.Mode)).
Msg("Creating inode.")
out.NodeId = f.InsertChild(parentID, inode)
out.Attr = inode.makeAttr()
out.SetAttrTimeout(timeout)
out.SetEntryTimeout(timeout)
return fuse.OK
}
// Create creates a regular file and opens it. The server doesn't have this yet.
func (f *Filesystem) Create(cancel <-chan struct{}, in *fuse.CreateIn, name string, out *fuse.CreateOut) fuse.Status {
// we reuse mknod here
result := f.Mknod(
cancel,
// we don't actually use the umask or padding here, so they don't get passed
&fuse.MknodIn{
InHeader: in.InHeader,
Mode: in.Mode,
},
name,
&out.EntryOut,
)
if result == fuse.Status(syscall.EEXIST) {
// if the inode already exists, we should truncate the existing file and
// return the existing file inode as per "man creat"
parentID := f.TranslateID(in.NodeId)
child, _ := f.GetChild(parentID, name, f.auth)
log.Debug().
Str("op", "Create").
Uint64("nodeID", in.NodeId).
Str("id", parentID).
Str("childID", child.ID()).
Str("path", child.Path()).
Str("mode", Octal(in.Mode)).
Msg("Child inode already exists, truncating.")
f.content.Delete(child.ID())
f.content.Open(child.ID())
child.DriveItem.Size = 0
child.hasChanges = true
return fuse.OK
}
// no further initialized required to open the file, it's empty
return result
}
// Open fetches a Inodes's content and initializes the .Data field with actual
// data from the server.
func (f *Filesystem) Open(cancel <-chan struct{}, in *fuse.OpenIn, out *fuse.OpenOut) fuse.Status {
id := f.TranslateID(in.NodeId)
inode := f.GetID(id)
if inode == nil {
return fuse.ENOENT
}
path := inode.Path()
ctx := log.With().
Str("op", "Open").
Uint64("nodeID", in.NodeId).
Str("id", id).
Str("path", path).
Logger()
flags := int(in.Flags)
if flags&os.O_RDWR+flags&os.O_WRONLY > 0 && f.IsOffline() {
ctx.Warn().
Bool("readWrite", flags&os.O_RDWR > 0).
Bool("writeOnly", flags&os.O_WRONLY > 0).
Msg("Refusing Open() with write flag, FS is offline.")
return fuse.EROFS
}
ctx.Debug().Msg("")
// try grabbing from disk
fd, err := f.content.Open(id)
if err != nil {
ctx.Error().Err(err).Msg("Could not create cache file.")
return fuse.EIO
}
if isLocalID(id) {
// just use whatever's present if we're the only ones who have it
return fuse.OK
}
// we have something on disk-
// verify content against what we're supposed to have
inode.Lock()
defer inode.Unlock()
// stay locked until end to prevent multiple Opens() from competing for
// downloads of the same file.
if inode.VerifyChecksum(graph.QuickXORHashStream(fd)) {
// disk content is only used if the checksums match
ctx.Info().Msg("Found content in cache.")
// we check size ourselves in case the API file sizes are WRONG (it happens)
st, _ := fd.Stat()
inode.DriveItem.Size = uint64(st.Size())
return fuse.OK
}
ctx.Info().Msg(
"Not using cached item due to file hash mismatch, fetching content from API.",
)
// write to tempfile first to ensure our download is good
tempID := "temp-" + id
temp, err := f.content.Open(tempID)
if err != nil {
ctx.Error().Err(err).Msg("Failed to create tempfile for download.")
return fuse.EIO
}
defer f.content.Delete(tempID)
// replace content only on a match
size, err := graph.GetItemContentStream(id, f.auth, temp)
if err != nil || !inode.VerifyChecksum(graph.QuickXORHashStream(temp)) {
ctx.Error().Err(err).Msg("Failed to fetch remote content.")
return fuse.EREMOTEIO
}
temp.Seek(0, 0) // being explicit, even though already done in hashstream func
fd.Seek(0, 0)
fd.Truncate(0)
io.Copy(fd, temp)
inode.DriveItem.Size = size
return fuse.OK
}
// Unlink deletes a child file.
func (f *Filesystem) Unlink(cancel <-chan struct{}, in *fuse.InHeader, name string) fuse.Status {
parentID := f.TranslateID(in.NodeId)
child, _ := f.GetChild(parentID, name, nil)
if child == nil {
// the file we are unlinking never existed
return fuse.ENOENT
}
if f.IsOffline() {
return fuse.EROFS
}
id := child.ID()
path := child.Path()
ctx := log.With().
Str("op", "Unlink").
Uint64("nodeID", in.NodeId).
Str("id", parentID).
Str("childID", id).
Str("path", path).
Logger()
ctx.Debug().Msg("Unlinking inode.")
// if no ID, the item is local-only, and does not need to be deleted on the
// server
if !isLocalID(id) {
if err := graph.Remove(id, f.auth); err != nil {
ctx.Err(err).Msg("Failed to delete item on server. Aborting op.")
return fuse.EREMOTEIO
}
}
f.DeleteID(id)
f.content.Delete(id)
return fuse.OK
}
// Read an inode's data like a file.
func (f *Filesystem) Read(cancel <-chan struct{}, in *fuse.ReadIn, buf []byte) (fuse.ReadResult, fuse.Status) {
inode := f.GetNodeID(in.NodeId)
if inode == nil {
return fuse.ReadResultData(make([]byte, 0)), fuse.EBADF
}
id := inode.ID()
path := inode.Path()
ctx := log.With().
Str("op", "Read").
Uint64("nodeID", in.NodeId).
Str("id", id).
Str("path", path).
Int("bufsize", len(buf)).
Logger()
ctx.Trace().Msg("")
fd, err := f.content.Open(id)
if err != nil {
ctx.Error().Err(err).Msg("Cache Open() failed.")
return fuse.ReadResultData(make([]byte, 0)), fuse.EIO
}
// we are locked for the remainder of this op
inode.RLock()
defer inode.RUnlock()
return fuse.ReadResultFd(fd.Fd(), int64(in.Offset), int(in.Size)), fuse.OK
}
// Write to an Inode like a file. Note that changes are 100% local until
// Flush() is called. Returns the number of bytes written and the status of the
// op.
func (f *Filesystem) Write(cancel <-chan struct{}, in *fuse.WriteIn, data []byte) (uint32, fuse.Status) {
id := f.TranslateID(in.NodeId)
inode := f.GetID(id)
if inode == nil {
return 0, fuse.EBADF
}
nWrite := len(data)
offset := int(in.Offset)
ctx := log.With().
Str("op", "Write").
Str("id", id).
Uint64("nodeID", in.NodeId).
Str("path", inode.Path()).
Int("bufsize", nWrite).
Int("offset", offset).
Logger()
ctx.Trace().Msg("")
fd, err := f.content.Open(id)
if err != nil {
ctx.Error().Msg("Cache Open() failed.")
return 0, fuse.EIO
}
inode.Lock()
defer inode.Unlock()
n, err := fd.WriteAt(data, int64(offset))
if err != nil {
ctx.Error().Err(err).Msg("Error during write")
return uint32(n), fuse.EIO
}
st, _ := fd.Stat()
inode.DriveItem.Size = uint64(st.Size())
inode.hasChanges = true
return uint32(n), fuse.OK
}
// Fsync is a signal to ensure writes to the Inode are flushed to stable
// storage. This method is used to trigger uploads of file content.
func (f *Filesystem) Fsync(cancel <-chan struct{}, in *fuse.FsyncIn) fuse.Status {
id := f.TranslateID(in.NodeId)
inode := f.GetID(id)
if inode == nil {
return fuse.EBADF
}
ctx := log.With().
Str("op", "Fsync").
Str("id", id).
Uint64("nodeID", in.NodeId).
Str("path", inode.Path()).
Logger()
ctx.Debug().Msg("")
if inode.HasChanges() {
inode.Lock()
inode.hasChanges = false
// recompute hashes when saving new content
inode.DriveItem.File = &graph.File{}
fd, err := f.content.Open(id)
if err != nil {
ctx.Error().Err(err).Msg("Could not get fd.")
}
fd.Sync()
inode.DriveItem.File.Hashes.QuickXorHash = graph.QuickXORHashStream(fd)
inode.Unlock()
if err := f.uploads.QueueUpload(inode); err != nil {
ctx.Error().Err(err).Msg("Error creating upload session.")
return fuse.EREMOTEIO
}
return fuse.OK
}
return fuse.OK
}
// Flush is called when a file descriptor is closed. Uses Fsync() to perform file
// uploads. (Release not implemented because all cleanup is already done here).
func (f *Filesystem) Flush(cancel <-chan struct{}, in *fuse.FlushIn) fuse.Status {
inode := f.GetNodeID(in.NodeId)
if inode == nil {
return fuse.EBADF
}
id := inode.ID()
log.Trace().
Str("op", "Flush").
Str("id", id).
Str("path", inode.Path()).
Uint64("nodeID", in.NodeId).
Msg("")
f.Fsync(cancel, &fuse.FsyncIn{InHeader: in.InHeader})
f.content.Close(id)
return 0
}
// Getattr returns a the Inode as a UNIX stat. Holds the read mutex for all of
// the "metadata fetch" operations.
func (f *Filesystem) GetAttr(cancel <-chan struct{}, in *fuse.GetAttrIn, out *fuse.AttrOut) fuse.Status {
id := f.TranslateID(in.NodeId)
inode := f.GetID(id)
if inode == nil {
return fuse.ENOENT
}
log.Trace().
Str("op", "GetAttr").
Uint64("nodeID", in.NodeId).
Str("id", id).
Str("path", inode.Path()).
Msg("")
out.Attr = inode.makeAttr()
out.SetTimeout(timeout)
return fuse.OK
}
// Setattr is the workhorse for setting filesystem attributes. Does the work of
// operations like utimens, chmod, chown (not implemented, FUSE is single-user),
// and truncate.
func (f *Filesystem) SetAttr(cancel <-chan struct{}, in *fuse.SetAttrIn, out *fuse.AttrOut) fuse.Status {
i := f.GetNodeID(in.NodeId)
if i == nil {
return fuse.ENOENT
}
path := i.Path()
isDir := i.IsDir() // holds an rlock
i.Lock()
ctx := log.With().
Str("op", "SetAttr").
Uint64("nodeID", in.NodeId).
Str("id", i.DriveItem.ID).
Str("path", path).
Logger()
// utimens
if mtime, valid := in.GetMTime(); valid {
ctx.Info().
Str("subop", "utimens").
Time("oldMtime", *i.DriveItem.ModTime).
Time("newMtime", *i.DriveItem.ModTime).
Msg("")
i.DriveItem.ModTime = &mtime
}
// chmod
if mode, valid := in.GetMode(); valid {
ctx.Info().
Str("subop", "chmod").
Str("oldMode", Octal(i.mode)).
Str("newMode", Octal(mode)).
Msg("")
if isDir {
i.mode = fuse.S_IFDIR | mode
} else {
i.mode = fuse.S_IFREG | mode
}
}
// truncate
if size, valid := in.GetSize(); valid {
ctx.Info().
Str("subop", "truncate").
Uint64("oldSize", i.DriveItem.Size).
Uint64("newSize", size).
Msg("")
fd, _ := f.content.Open(i.DriveItem.ID)
// the unix syscall does not update the seek position, so neither should we
fd.Truncate(int64(size))
i.DriveItem.Size = size
i.hasChanges = true
}
i.Unlock()
out.Attr = i.makeAttr()
out.SetTimeout(timeout)
return fuse.OK
}
// Rename renames and/or moves an inode.
func (f *Filesystem) Rename(cancel <-chan struct{}, in *fuse.RenameIn, name string, newName string) fuse.Status {
if isNameRestricted(newName) {
return fuse.EINVAL
}
oldParentID := f.TranslateID(in.NodeId)
oldParentItem := f.GetNodeID(in.NodeId)
if oldParentID == "" || oldParentItem == nil {
return fuse.EBADF
}
path := filepath.Join(oldParentItem.Path(), name)
// we'll have the metadata for the dest inode already so it is not necessary
// to use GetPath() to prefetch it. In order for the fs to know about this
// inode, it has already fetched all of the inodes up to the new destination.
newParentItem := f.GetNodeID(in.Newdir)
if newParentItem == nil {
return fuse.ENOENT
}
dest := filepath.Join(newParentItem.Path(), newName)
inode, _ := f.GetChild(oldParentID, name, f.auth)
id, err := f.remoteID(inode)
newParentID := newParentItem.ID()
ctx := log.With().
Str("op", "Rename").
Str("id", id).
Str("parentID", newParentID).
Str("path", path).
Str("dest", dest).
Logger()
ctx.Info().
Uint64("srcNodeID", in.NodeId).
Uint64("dstNodeID", in.Newdir).
Msg("")
if isLocalID(id) || err != nil {
// uploads will fail without an id
ctx.Error().Err(err).
Msg("ID of item to move cannot be local and we failed to obtain an ID.")
return fuse.EREMOTEIO
}
// perform remote rename
if err = graph.Rename(id, newName, newParentID, f.auth); err != nil {
ctx.Error().Err(err).Msg("Failed to rename remote item.")
return fuse.EREMOTEIO
}
// now rename local copy
if err = f.MovePath(oldParentID, newParentID, name, newName, f.auth); err != nil {
ctx.Error().Err(err).Msg("Failed to rename local item.")
return fuse.EIO
}
// whew! item renamed
return fuse.OK
}
onedriver-0.14.1/fs/fs_test.go 0000664 0000000 0000000 00000041546 14513675524 0016226 0 ustar 00root root 0000000 0000000 // A bunch of "black box" filesystem integration tests that test the
// functionality of key syscalls and their implementation. If something fails
// here, the filesystem is not functional.
package fs
import (
"bufio"
"bytes"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"testing"
"time"
"github.com/jstaf/onedriver/fs/graph"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// Does Go's internal ReadDir function work? This is mostly here to compare against
// the offline versions of this test.
func TestReaddir(t *testing.T) {
t.Parallel()
files, err := ioutil.ReadDir("mount")
if err != nil {
t.Fatal(err)
}
for _, file := range files {
if file.Name() == "Documents" {
return
}
}
t.Fatal("Could not find \"Documents\" folder.")
}
// does ls work and can we find the Documents folder?
func TestLs(t *testing.T) {
t.Parallel()
stdout, err := exec.Command("ls", "mount").Output()
require.NoError(t, err)
sout := string(stdout)
if !strings.Contains(sout, "Documents") {
t.Fatal("Could not find \"Documents\" folder.")
}
}
// can touch create an empty file?
func TestTouchCreate(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "empty")
syscall.Umask(022) // otherwise tests fail if default umask is 002
require.NoError(t, exec.Command("touch", fname).Run())
st, err := os.Stat(fname)
require.NoError(t, err)
require.Zero(t, st.Size(), "Size should be zero.")
if st.Mode() != 0644 {
t.Fatal("Mode of new file was not 644, got", Octal(uint32(st.Mode())))
}
require.False(t, st.IsDir(), "New file detected as directory.")
}
// does the touch command update modification time properly?
func TestTouchUpdateTime(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "modtime")
require.NoError(t, exec.Command("touch", fname).Run())
st1, _ := os.Stat(fname)
time.Sleep(2 * time.Second)
require.NoError(t, exec.Command("touch", fname).Run())
st2, _ := os.Stat(fname)
if st2.ModTime().Equal(st1.ModTime()) || st2.ModTime().Before(st1.ModTime()) {
t.Fatalf("File modification time was not updated by touch:\n"+
"Before: %d\nAfter: %d\n", st1.ModTime().Unix(), st2.ModTime().Unix())
}
}
// chmod should *just work*
func TestChmod(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "chmod_tester")
require.NoError(t, exec.Command("touch", fname).Run())
require.NoError(t, os.Chmod(fname, 0777))
st, _ := os.Stat(fname)
if st.Mode() != 0777 {
t.Fatalf("Mode of file was not 0777, got %o instead!", st.Mode())
}
}
// test that both mkdir and rmdir work, as well as the potentially failing
// mkdir->rmdir->mkdir chain that fails if the cache hangs on to an old copy
// after rmdir
func TestMkdirRmdir(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "folder1")
require.NoError(t, os.Mkdir(fname, 0755))
require.NoError(t, os.Remove(fname))
require.NoError(t, os.Mkdir(fname, 0755))
}
// We shouldn't be able to rmdir nonempty directories
func TestRmdirNonempty(t *testing.T) {
t.Parallel()
dir := filepath.Join(TestDir, "nonempty")
require.NoError(t, os.Mkdir(dir, 0755))
require.NoError(t, os.Mkdir(filepath.Join(dir, "contents"), 0755))
require.Error(t, os.Remove(dir), "We somehow removed a nonempty directory!")
require.NoError(t, os.RemoveAll(dir),
"Could not remove a nonempty directory the correct way!")
}
// test that we can write to a file and read its contents back correctly
func TestReadWrite(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "write.txt")
content := "my hands are typing words\n"
require.NoError(t, ioutil.WriteFile(fname, []byte(content), 0644))
read, err := ioutil.ReadFile(fname)
require.NoError(t, err)
assert.Equal(t, content, string(read), "File content was not correct.")
}
// ld can crash the filesystem because it starts writing output at byte 64 in previously
// empty file
func TestWriteOffset(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "main.c")
require.NoError(t, ioutil.WriteFile(fname,
[]byte(`#include
int main(int argc, char **argv) {
printf("ld writes files in a funny manner!");
}`), 0644))
require.NoError(t, exec.Command("gcc", "-o", filepath.Join(TestDir, "main.o"), fname).Run())
}
// test that we can create a file and rename it
// TODO this can fail if a server-side rename undoes the second local rename
func TestRenameMove(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "rename.txt")
dname := filepath.Join(TestDir, "new-destination-name.txt")
require.NoError(t, ioutil.WriteFile(fname, []byte("hopefully renames work\n"), 0644))
require.NoError(t, os.Rename(fname, dname))
st, err := os.Stat(dname)
require.NoError(t, err)
require.NotNil(t, st, "Renamed file does not exist.")
os.Mkdir(filepath.Join(TestDir, "dest"), 0755)
dname2 := filepath.Join(TestDir, "dest/even-newer-name.txt")
require.NoError(t, os.Rename(dname, dname2))
st, err = os.Stat(dname2)
require.NoError(t, err)
require.NotNil(t, st, "Renamed file does not exist.")
}
// test that copies work as expected
func TestCopy(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "copy-start.txt")
dname := filepath.Join(TestDir, "copy-end.txt")
content := "and copies too!\n"
require.NoError(t, ioutil.WriteFile(fname, []byte(content), 0644))
require.NoError(t, exec.Command("cp", fname, dname).Run())
read, err := ioutil.ReadFile(fname)
require.NoError(t, err)
assert.Equal(t, content, string(read), "File content was not correct.")
}
// do appends work correctly?
func TestAppend(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "append.txt")
for i := 0; i < 5; i++ {
file, _ := os.OpenFile(fname, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644)
file.WriteString("append\n")
file.Close()
}
file, err := os.Open(fname)
require.NoError(t, err)
defer file.Close()
scanner := bufio.NewScanner(file)
var counter int
for scanner.Scan() {
counter++
scanned := scanner.Text()
if scanned != "append" {
t.Fatalf("File text was wrong. Got \"%s\", wanted \"append\"\n", scanned)
}
}
if counter != 5 {
t.Fatalf("Got wrong number of lines (%d), expected 5\n", counter)
}
}
// identical to TestAppend, but truncates the file each time it is written to
func TestTruncate(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "truncate.txt")
for i := 0; i < 5; i++ {
file, _ := os.OpenFile(fname, os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0644)
file.WriteString("append\n")
file.Close()
}
file, err := os.Open(fname)
require.NoError(t, err)
defer file.Close()
scanner := bufio.NewScanner(file)
var counter int
for scanner.Scan() {
counter++
assert.Equal(t, "append", scanner.Text(), "File text was wrong.")
}
if counter != 1 {
t.Fatalf("Got wrong number of lines (%d), expected 1\n", counter)
}
}
// can we seek to the middle of a file and do writes there correctly?
func TestReadWriteMidfile(t *testing.T) {
t.Parallel()
content := `Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Phasellus viverra dui vel velit eleifend, vel auctor nulla scelerisque.
Mauris volutpat a justo vel suscipit. Suspendisse diam lorem, imperdiet eget
fermentum ut, sodales a nunc. Phasellus eget mattis purus. Aenean vitae justo
condimentum, rutrum libero non, commodo ex. Nullam mi metus, accumsan sit
amet varius non, volutpat eget mi. Fusce sollicitudin arcu eget ipsum
gravida, ut blandit turpis facilisis. Quisque vel rhoncus nulla, ultrices
tempor turpis. Nullam urna leo, dapibus eu velit eu, venenatis aliquet
tortor. In tempus lacinia est, nec gravida ipsum viverra sed. In vel felis
vitae odio pulvinar egestas. Sed ullamcorper, nulla non molestie dictum,
massa lectus mattis dolor, in volutpat nulla lectus id neque.`
fname := filepath.Join(TestDir, "midfile.txt")
require.NoError(t, ioutil.WriteFile(fname, []byte(content), 0644))
file, _ := os.OpenFile(fname, os.O_RDWR, 0644)
defer file.Close()
match := "my hands are typing words. aaaaaaa"
n, err := file.WriteAt([]byte(match), 123)
require.NoError(t, err)
require.Equal(t, len(match), n, "Wrong number of bytes written.")
result := make([]byte, len(match))
n, err = file.ReadAt(result, 123)
require.NoError(t, err)
require.Equal(t, len(match), n, "Wrong number of bytes read.")
require.Equal(t, match, string(result), "Content did not match expected output.")
}
// Statfs should succeed
func TestStatFs(t *testing.T) {
t.Parallel()
var st syscall.Statfs_t
err := syscall.Statfs(TestDir, &st)
require.NoError(t, err)
require.NotZero(t, st.Blocks, "StatFs failed, got 0 blocks!")
}
// does unlink work? (because apparently we weren't testing that before...)
func TestUnlink(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "unlink_tester")
require.NoError(t, exec.Command("touch", fname).Run())
require.NoError(t, os.Remove(fname))
stdout, _ := exec.Command("ls", "mount").Output()
if strings.Contains(string(stdout), "unlink_tester") {
t.Fatalf("Deleting %s did not work.", fname)
}
}
// OneDrive is case-insensitive due to limitations imposed by Windows NTFS
// filesystem. Make sure we prevent users of normal systems from running into
// issues with OneDrive's case-insensitivity.
func TestNTFSIsABadFilesystem(t *testing.T) {
t.Parallel()
require.NoError(t, ioutil.WriteFile(filepath.Join(TestDir, "case-sensitive.txt"),
[]byte("NTFS is bad"), 0644))
require.NoError(t, ioutil.WriteFile(filepath.Join(TestDir, "CASE-SENSITIVE.txt"),
[]byte("yep"), 0644))
content, err := ioutil.ReadFile(filepath.Join(TestDir, "Case-Sensitive.TXT"))
require.NoError(t, err)
require.Equal(t, "yep", string(content), "Did not find expected output.")
}
// same as last test, but with exclusive create() calls.
func TestNTFSIsABadFilesystem2(t *testing.T) {
t.Parallel()
file, err := os.OpenFile(filepath.Join(TestDir, "case-sensitive2.txt"), os.O_CREATE|os.O_EXCL, 0644)
file.Close()
require.NoError(t, err)
file, err = os.OpenFile(filepath.Join(TestDir, "CASE-SENSITIVE2.txt"), os.O_CREATE|os.O_EXCL, 0644)
file.Close()
require.Error(t, err,
"We should be throwing an error, since OneDrive is case-insensitive.")
}
// Ensure that case-sensitivity collisions due to renames are handled properly
// (allow rename/overwrite for exact matches, deny when case-sensitivity would
// normally allow success)
func TestNTFSIsABadFilesystem3(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "original_NAME.txt")
ioutil.WriteFile(fname, []byte("original"), 0644)
// should work
secondName := filepath.Join(TestDir, "new_name.txt")
require.NoError(t, ioutil.WriteFile(secondName, []byte("new"), 0644))
require.NoError(t, os.Rename(secondName, fname))
contents, err := ioutil.ReadFile(fname)
require.NoError(t, err)
require.Equal(t, "new", string(contents), "Contents did not match expected output.")
// should fail
thirdName := filepath.Join(TestDir, "new_name2.txt")
require.NoError(t, ioutil.WriteFile(thirdName, []byte("this rename should work"), 0644))
err = os.Rename(thirdName, filepath.Join(TestDir, "original_name.txt"))
require.NoError(t, err, "Rename failed.")
_, err = os.Stat(fname)
require.NoErrorf(t, err, "\"%s\" does not exist after the rename.", fname)
}
// This test is insurance to prevent tests (and the fs) from accidentally not
// storing case for filenames at all
func TestChildrenAreCasedProperly(t *testing.T) {
t.Parallel()
require.NoError(t, ioutil.WriteFile(
filepath.Join(TestDir, "CASE-check.txt"), []byte("yep"), 0644))
stdout, err := exec.Command("ls", TestDir).Output()
if err != nil {
t.Fatalf("%s: %s", err, stdout)
}
if !strings.Contains(string(stdout), "CASE-check.txt") {
t.Fatalf("Upper case filenames were not honored, "+
"expected \"CASE-check.txt\" in output, got %s\n", string(stdout))
}
}
// Test that when running "echo some text > file.txt" that file.txt actually
// becomes populated
func TestEchoWritesToFile(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "bagels")
out, err := exec.Command("bash", "-c", "echo bagels > "+fname).CombinedOutput()
require.NoError(t, err, out)
content, err := ioutil.ReadFile(fname)
require.NoError(t, err)
if !bytes.Contains(content, []byte("bagels")) {
t.Fatalf("Populating a file via 'echo' failed. Got: \"%s\", wanted \"bagels\"\n", content)
}
}
// Test that if we stat a file, we get some correct information back
func TestStat(t *testing.T) {
t.Parallel()
stat, err := os.Stat("mount/Documents")
require.NoError(t, err)
require.Equal(t, "Documents", stat.Name(), "Name was not \"Documents\".")
if stat.ModTime().Year() < 1971 {
t.Fatal("Modification time of /Documents wrong, got: " + stat.ModTime().String())
}
if !stat.IsDir() {
t.Fatalf("Mode of /Documents wrong, not detected as directory, got: %s", stat.Mode())
}
}
// Question marks appear in `ls -l`s output if an item is populated via readdir,
// but subsequently not found by lookup. Also is a nice catch-all for fs
// metadata corruption, as `ls` will exit with 1 if something bad happens.
func TestNoQuestionMarks(t *testing.T) {
t.Parallel()
out, err := exec.Command("ls", "-l", "mount/").CombinedOutput()
if strings.Contains(string(out), "??????????") || err != nil {
t.Log("A Lookup() failed on an inode found by Readdir()")
t.Log(string(out))
t.FailNow()
}
}
// Trashing items through nautilus or other Linux file managers is done via
// "gio trash". Make an item then trash it to verify that this works.
func TestGIOTrash(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "trash_me.txt")
require.NoError(t, ioutil.WriteFile(fname, []byte("i should be trashed"), 0644))
out, err := exec.Command("gio", "trash", fname).CombinedOutput()
if err != nil {
t.Log(string(out))
t.Log(err)
if st, err2 := os.Stat(fname); err2 == nil {
if !st.IsDir() && strings.Contains(string(out), "Is a directory") {
t.Skip("This is a GIO bug (it complains about test file being " +
"a directory despite correct metadata from onedriver), skipping.")
}
t.Fatal(fname, "still exists after deletion!")
}
}
if strings.Contains(string(out), "Unable to find or create trash directory") {
t.Fatal(string(out))
}
}
// Test that we are able to work around onedrive paging limits when
// listing a folder's children.
func TestListChildrenPaging(t *testing.T) {
t.Parallel()
// files have been prepopulated during test setup to avoid being picked up by
// the delta thread
items, err := graph.GetItemChildrenPath("/onedriver_tests/paging", auth)
require.NoError(t, err)
files, err := ioutil.ReadDir(filepath.Join(TestDir, "paging"))
require.NoError(t, err)
if len(files) < 201 {
if len(items) < 201 {
t.Logf("Skipping test, number of paging files from the API were also less than 201.\nAPI: %d\nFS: %d\n",
len(items), len(files),
)
t.SkipNow()
}
t.Fatalf("Paging limit failed. Got %d files, wanted at least 201.\n", len(files))
}
}
// Libreoffice writes to files in a funny manner and it can result in a 0 byte file
// being uploaded (can check syscalls via "inotifywait -m -r .").
func TestLibreOfficeSavePattern(t *testing.T) {
t.Parallel()
content := []byte("This will break things.")
fname := filepath.Join(TestDir, "libreoffice.txt")
require.NoError(t, ioutil.WriteFile(fname, content, 0644))
out, err := exec.Command(
"libreoffice",
"--headless",
"--convert-to", "docx",
"--outdir", TestDir,
fname,
).CombinedOutput()
require.NoError(t, err, out)
// libreoffice document conversion can fail with an exit code of 0,
// so we need to actually check the command output
require.NotContains(t, string(out), "Error:")
assert.Eventually(t, func() bool {
item, err := graph.GetItemPath("/onedriver_tests/libreoffice.docx", auth)
if err == nil && item != nil {
if item.Size == 0 {
t.Fatal("Item size was 0!")
}
return true
}
return false
}, retrySeconds, 3*time.Second,
"Could not find /onedriver_tests/libreoffice.docx post-upload!",
)
}
// TestDisallowedFilenames verifies that we can't create any of the disallowed filenames
// https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa
func TestDisallowedFilenames(t *testing.T) {
t.Parallel()
contents := []byte("this should not work")
assert.Error(t, os.WriteFile(filepath.Join(TestDir, "disallowed: filename.txt"), contents, 0644))
assert.Error(t, os.WriteFile(filepath.Join(TestDir, "disallowed_vti_text.txt"), contents, 0644))
assert.Error(t, os.WriteFile(filepath.Join(TestDir, "disallowed_<_text.txt"), contents, 0644))
assert.Error(t, os.WriteFile(filepath.Join(TestDir, "COM0"), contents, 0644))
assert.Error(t, os.Mkdir(filepath.Join(TestDir, "disallowed:folder"), 0755))
assert.Error(t, os.Mkdir(filepath.Join(TestDir, "disallowed_vti_folder"), 0755))
assert.Error(t, os.Mkdir(filepath.Join(TestDir, "disallowed>folder"), 0755))
assert.Error(t, os.Mkdir(filepath.Join(TestDir, "desktop.ini"), 0755))
require.NoError(t, os.Mkdir(filepath.Join(TestDir, "valid-directory"), 0755))
assert.Error(t, os.Rename(
filepath.Join(TestDir, "valid-directory"),
filepath.Join(TestDir, "invalid_vti_directory"),
))
}
onedriver-0.14.1/fs/graph/ 0000775 0000000 0000000 00000000000 14513675524 0015317 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/fs/graph/drive_item.go 0000664 0000000 0000000 00000020454 14513675524 0020002 0 ustar 00root root 0000000 0000000 package graph
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/url"
"strings"
"time"
"github.com/rs/zerolog/log"
)
// DriveTypePersonal and friends represent the possible different values for a
// drive's type when fetched from the API.
const (
DriveTypePersonal = "personal"
DriveTypeBusiness = "business"
DriveTypeSharepoint = "documentLibrary"
)
// DriveItemParent describes a DriveItem's parent in the Graph API
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/itemreference
type DriveItemParent struct {
//TODO Path is technically available, but we shouldn't use it
Path string `json:"path,omitempty"`
ID string `json:"id,omitempty"`
DriveID string `json:"driveId,omitempty"`
DriveType string `json:"driveType,omitempty"` // personal | business | documentLibrary
}
// Folder is used for parsing only
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/folder
type Folder struct {
ChildCount uint32 `json:"childCount,omitempty"`
}
// Hashes are integrity hashes used to determine if file content has changed.
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/hashes
type Hashes struct {
SHA1Hash string `json:"sha1Hash,omitempty"`
QuickXorHash string `json:"quickXorHash,omitempty"`
}
// File is used for checking for changes in local files (relative to the server).
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/file
type File struct {
Hashes Hashes `json:"hashes,omitempty"`
}
// Deleted is used for detecting when items get deleted on the server
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/deleted
type Deleted struct {
State string `json:"state,omitempty"`
}
// DriveItem contains the data fields from the Graph API
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/driveitem
type DriveItem struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Size uint64 `json:"size,omitempty"`
ModTime *time.Time `json:"lastModifiedDatetime,omitempty"`
Parent *DriveItemParent `json:"parentReference,omitempty"`
Folder *Folder `json:"folder,omitempty"`
File *File `json:"file,omitempty"`
Deleted *Deleted `json:"deleted,omitempty"`
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior,omitempty"`
ETag string `json:"eTag,omitempty"`
}
// IsDir returns if the DriveItem represents a directory or not
func (d *DriveItem) IsDir() bool {
return d.Folder != nil
}
// ModTimeUnix returns the modification time as a unix uint64 time
func (d *DriveItem) ModTimeUnix() uint64 {
return uint64(d.ModTime.Unix())
}
// getItem is the internal method used to lookup items
func getItem(path string, auth *Auth) (*DriveItem, error) {
body, err := Get(path, auth)
if err != nil {
return nil, err
}
item := &DriveItem{}
err = json.Unmarshal(body, item)
if err != nil && bytes.Contains(body, []byte("\"size\":-")) {
// onedrive for business directories can sometimes have negative sizes,
// ignore this error
err = nil
}
return item, err
}
// GetItem fetches a DriveItem by ID. ID can also be "root" for the root item.
func GetItem(id string, auth *Auth) (*DriveItem, error) {
return getItem(IDPath(id), auth)
}
// GetItemChild fetches the named child of an item.
func GetItemChild(id string, name string, auth *Auth) (*DriveItem, error) {
return getItem(
fmt.Sprintf("%s:/%s", IDPath(id), url.PathEscape(name)),
auth,
)
}
// GetItemPath fetches a DriveItem by path. Only used in special cases, like for the
// root item.
func GetItemPath(path string, auth *Auth) (*DriveItem, error) {
return getItem(ResourcePath(path), auth)
}
// GetItemContent retrieves an item's content from the Graph endpoint.
func GetItemContent(id string, auth *Auth) ([]byte, uint64, error) {
buf := bytes.NewBuffer(make([]byte, 0))
n, err := GetItemContentStream(id, auth, buf)
return buf.Bytes(), uint64(n), err
}
// GetItemContentStream is the same as GetItemContent, but writes data to an
// output reader. This function assumes a brand-new io.Writer is used, so
// "output" must be truncated if there is content already in the io.Writer
// prior to use.
func GetItemContentStream(id string, auth *Auth, output io.Writer) (uint64, error) {
// determine the size of the item
item, err := GetItem(id, auth)
if err != nil {
return 0, err
}
const downloadChunkSize = 10 * 1024 * 1024
downloadURL := fmt.Sprintf("/me/drive/items/%s/content", id)
if item.Size <= downloadChunkSize {
// simple one-shot download
content, err := Get(downloadURL, auth)
if err != nil {
return 0, err
}
n, err := output.Write(content)
return uint64(n), err
}
// multipart download
var n uint64
for i := 0; i < int(item.Size/downloadChunkSize)+1; i++ {
start := i * downloadChunkSize
end := start + downloadChunkSize - 1
log.Info().
Str("id", item.ID).
Str("name", item.Name).
Msgf("Downloading bytes %d-%d/%d.", start, end, item.Size)
content, err := Get(downloadURL, auth, Header{
key: "Range",
value: fmt.Sprintf("bytes=%d-%d", start, end),
})
if err != nil {
return n, err
}
written, err := output.Write(content)
n += uint64(written)
if err != nil {
return n, err
}
}
log.Info().
Str("id", item.ID).
Str("name", item.Name).
Uint64("size", n).
Msgf("Download completed!")
return n, nil
}
// Remove removes a directory or file by ID
func Remove(id string, auth *Auth) error {
return Delete("/me/drive/items/"+id, auth)
}
// Mkdir creates a directory on the server at the specified parent ID.
func Mkdir(name string, parentID string, auth *Auth) (*DriveItem, error) {
// create a new folder on the server
newFolderPost := DriveItem{
Name: name,
Folder: &Folder{},
}
bytePayload, _ := json.Marshal(newFolderPost)
resp, err := Post(childrenPathID(parentID), auth, bytes.NewReader(bytePayload))
if err != nil {
return nil, err
}
err = json.Unmarshal(resp, &newFolderPost)
return &newFolderPost, err
}
// Rename moves and/or renames an item on the server. The itemName and parentID
// arguments correspond to the *new* basename or id of the parent.
func Rename(itemID string, itemName string, parentID string, auth *Auth) error {
// start creating patch content for server
// mutex does not need to be initialized since it is never used locally
patchContent := DriveItem{
ConflictBehavior: "replace", // overwrite existing content at new location
Name: itemName,
Parent: &DriveItemParent{
ID: parentID,
},
}
// apply patch to server copy - note that we don't actually care about the
// response content, only if it returns an error
jsonPatch, _ := json.Marshal(patchContent)
_, err := Patch("/me/drive/items/"+itemID, auth, bytes.NewReader(jsonPatch))
if err != nil && strings.Contains(err.Error(), "resourceModified") {
// Wait a second, then retry the request. The Onedrive servers sometimes
// aren't quick enough here if the object has been recently created
// (<1 second ago).
time.Sleep(time.Second)
_, err = Patch("/me/drive/items/"+itemID, auth, bytes.NewReader(jsonPatch))
}
return err
}
// only used for parsing
type driveChildren struct {
Children []*DriveItem `json:"value"`
NextLink string `json:"@odata.nextLink"`
}
// this is the internal method that actually fetches an item's children
func getItemChildren(pollURL string, auth *Auth) ([]*DriveItem, error) {
fetched := make([]*DriveItem, 0)
for pollURL != "" {
body, err := Get(pollURL, auth)
if err != nil {
return fetched, err
}
var pollResult driveChildren
json.Unmarshal(body, &pollResult)
// there can be multiple pages of 200 items each (default).
// continue to next interation if we have an @odata.nextLink value
fetched = append(fetched, pollResult.Children...)
pollURL = strings.TrimPrefix(pollResult.NextLink, GraphURL)
}
return fetched, nil
}
// GetItemChildren fetches all children of an item denoted by ID.
func GetItemChildren(id string, auth *Auth) ([]*DriveItem, error) {
return getItemChildren(childrenPathID(id), auth)
}
// GetItemChildrenPath fetches all children of an item denoted by path.
func GetItemChildrenPath(path string, auth *Auth) ([]*DriveItem, error) {
return getItemChildren(childrenPath(path), auth)
}
onedriver-0.14.1/fs/graph/drive_item_test.go 0000664 0000000 0000000 00000000664 14513675524 0021042 0 ustar 00root root 0000000 0000000 package graph
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetItem(t *testing.T) {
t.Parallel()
var auth Auth
auth.FromFile(".auth_tokens.json")
item, err := GetItemPath("/", &auth)
assert.NoError(t, err)
assert.Equal(t, "root", item.Name, "Failed to fetch directory root.")
_, err = GetItemPath("/lkjfsdlfjdwjkfl", &auth)
assert.Error(t, err, "We didn't return an error for a non-existent item!")
}
onedriver-0.14.1/fs/graph/graph.go 0000664 0000000 0000000 00000015073 14513675524 0016755 0 ustar 00root root 0000000 0000000 // Package graph provides the basic APIs to interact with Microsoft Graph. This includes
// the DriveItem resource and supporting resources which are the basis of working with
// files and folders through the Microsoft Graph API.
package graph
import (
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"time"
"github.com/imdario/mergo"
"github.com/rs/zerolog/log"
)
// GraphURL is the API endpoint of Microsoft Graph
const GraphURL = "https://graph.microsoft.com/v1.0"
// graphError is an internal struct used when decoding Graph's error messages
type graphError struct {
Error struct {
Code string `json:"code"`
Message string `json:"message"`
} `json:"error"`
}
// This is an additional header that can be specified to Request
type Header struct {
key, value string
}
// Request performs an authenticated request to Microsoft Graph
func Request(resource string, auth *Auth, method string, content io.Reader, headers ...Header) ([]byte, error) {
if auth == nil || auth.AccessToken == "" {
// a catch all condition to avoid wiping our auth by accident
log.Error().Msg("Auth was empty and we attempted to make a request with it!")
return nil, errors.New("cannot make a request with empty auth")
}
auth.Refresh()
client := &http.Client{Timeout: 60 * time.Second}
request, _ := http.NewRequest(method, GraphURL+resource, content)
request.Header.Add("Authorization", "bearer "+auth.AccessToken)
switch method { // request type-specific code here
case "PATCH":
request.Header.Add("If-Match", "*")
request.Header.Add("Content-Type", "application/json")
case "POST":
request.Header.Add("Content-Type", "application/json")
case "PUT":
request.Header.Add("Content-Type", "text/plain")
}
for _, header := range headers {
request.Header.Add(header.key, header.value)
}
response, err := client.Do(request)
if err != nil {
// the actual request failed
return nil, err
}
body, _ := ioutil.ReadAll(response.Body)
response.Body.Close()
if response.StatusCode == 401 {
var err graphError
json.Unmarshal(body, &err)
log.Warn().
Str("code", err.Error.Code).
Str("message", err.Error.Message).
Msg("Authentication token invalid or new app permissions required, " +
"forcing reauth before retrying.")
reauth := newAuth(auth.AuthConfig, auth.path, false)
mergo.Merge(auth, reauth, mergo.WithOverride)
request.Header.Set("Authorization", "bearer "+auth.AccessToken)
}
if response.StatusCode >= 500 || response.StatusCode == 401 {
// the onedrive API is having issues, retry once
response, err = client.Do(request)
if err != nil {
return nil, err
}
body, _ = ioutil.ReadAll(response.Body)
response.Body.Close()
}
if response.StatusCode >= 400 {
// something was wrong with the request
var err graphError
json.Unmarshal(body, &err)
return nil, fmt.Errorf("HTTP %d - %s: %s",
response.StatusCode, err.Error.Code, err.Error.Message)
}
return body, nil
}
// Get is a convenience wrapper around Request
func Get(resource string, auth *Auth, headers ...Header) ([]byte, error) {
return Request(resource, auth, "GET", nil, headers...)
}
// Patch is a convenience wrapper around Request
func Patch(resource string, auth *Auth, content io.Reader, headers ...Header) ([]byte, error) {
return Request(resource, auth, "PATCH", content, headers...)
}
// Post is a convenience wrapper around Request
func Post(resource string, auth *Auth, content io.Reader, headers ...Header) ([]byte, error) {
return Request(resource, auth, "POST", content, headers...)
}
// Put is a convenience wrapper around Request
func Put(resource string, auth *Auth, content io.Reader, headers ...Header) ([]byte, error) {
return Request(resource, auth, "PUT", content, headers...)
}
// Delete performs an HTTP delete
func Delete(resource string, auth *Auth, headers ...Header) error {
_, err := Request(resource, auth, "DELETE", nil, headers...)
return err
}
// IDPath computes the resource path for an item by ID
func IDPath(id string) string {
if id == "root" {
return "/me/drive/root"
}
return "/me/drive/items/" + url.PathEscape(id)
}
// ResourcePath translates an item's path to the proper path used by Graph
func ResourcePath(path string) string {
if path == "/" {
return "/me/drive/root"
}
return "/me/drive/root:" + url.PathEscape(path)
}
// ChildrenPath returns the path to an item's children
func childrenPath(path string) string {
if path == "/" {
return ResourcePath(path) + "/children"
}
return ResourcePath(path) + ":/children"
}
// ChildrenPathID returns the API resource path of an item's children
func childrenPathID(id string) string {
return fmt.Sprintf("/me/drive/items/%s/children", url.PathEscape(id))
}
// User represents the user. Currently only used to fetch the account email so
// we can display it in file managers with .xdg-volume-info
// https://docs.microsoft.com/en-ca/graph/api/user-get
type User struct {
UserPrincipalName string `json:"userPrincipalName"`
}
// GetUser fetches the current user details from the Graph API.
func GetUser(auth *Auth) (User, error) {
resp, err := Get("/me", auth)
user := User{}
if err == nil {
err = json.Unmarshal(resp, &user)
}
return user, err
}
// DriveQuota is used to parse the User's current storage quotas from the API
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/quota
type DriveQuota struct {
Deleted uint64 `json:"deleted"` // bytes in recycle bin
FileCount uint64 `json:"fileCount"` // unavailable on personal accounts
Remaining uint64 `json:"remaining"`
State string `json:"state"` // normal | nearing | critical | exceeded
Total uint64 `json:"total"`
Used uint64 `json:"used"`
}
// Drive has some general information about the user's OneDrive
// https://docs.microsoft.com/en-us/onedrive/developer/rest-api/resources/drive
type Drive struct {
ID string `json:"id"`
DriveType string `json:"driveType"` // personal | business | documentLibrary
Quota DriveQuota `json:"quota,omitempty"`
}
// GetDrive is used to fetch the details of the user's OneDrive.
func GetDrive(auth *Auth) (Drive, error) {
resp, err := Get("/me/drive", auth)
drive := Drive{}
if err != nil {
return drive, err
}
return drive, json.Unmarshal(resp, &drive)
}
// IsOffline checks if an error string from Request() is indicative of being offline.
func IsOffline(err error) bool {
if err == nil {
return false
}
// our error messages from Request() will be prefixed with "HTTP ### -" if we actually
// got an HTTP response (indicating we are not offline)
rexp := regexp.MustCompile("HTTP [0-9]+ - ")
return !rexp.MatchString(err.Error())
}
onedriver-0.14.1/fs/graph/graph_test.go 0000664 0000000 0000000 00000001155 14513675524 0020010 0 ustar 00root root 0000000 0000000 package graph
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestResourcePath(t *testing.T) {
t.Parallel()
assert.Equal(t,
`/me/drive/root:%2Fsome%20path%2Fhere%21`,
ResourcePath("/some path/here!"),
"Escaped path was wrong.",
)
}
func TestRequestUnauthenticated(t *testing.T) {
t.Parallel()
badAuth := &Auth{
// Set a renewal 1 year in the future so we don't accidentally overwrite
// our auth tokens
ExpiresAt: time.Now().Unix() + 60*60*24*365,
}
_, err := Get("/me/drive/root", badAuth)
assert.Error(t, err, "An unauthenticated request was not handled as an error")
}
onedriver-0.14.1/fs/graph/hashes.go 0000664 0000000 0000000 00000003717 14513675524 0017131 0 ustar 00root root 0000000 0000000 package graph
import (
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"fmt"
"io"
"strings"
"github.com/jstaf/onedriver/fs/graph/quickxorhash"
)
func SHA256Hash(data *[]byte) string {
return strings.ToUpper(fmt.Sprintf("%x", sha256.Sum256(*data)))
}
func SHA256HashStream(reader io.ReadSeeker) string {
reader.Seek(0, 0)
hash := sha256.New()
io.Copy(hash, reader)
reader.Seek(0, 0)
return strings.ToUpper(fmt.Sprintf("%x", hash.Sum(nil)))
}
// SHA1Hash returns the SHA1 hash of some data as a string
func SHA1Hash(data *[]byte) string {
// the onedrive API returns SHA1 hashes in all caps, so we do too
return strings.ToUpper(fmt.Sprintf("%x", sha1.Sum(*data)))
}
// SHA1HashStream hashes the contents of a stream.
func SHA1HashStream(reader io.ReadSeeker) string {
reader.Seek(0, 0)
hash := sha1.New()
io.Copy(hash, reader)
reader.Seek(0, 0)
return strings.ToUpper(fmt.Sprintf("%x", hash.Sum(nil)))
}
// QuickXORHash computes the Microsoft-specific QuickXORHash. Reusing rclone's
// implementation until I get the chance to rewrite/add test cases to remove the
// dependency.
func QuickXORHash(data *[]byte) string {
hash := quickxorhash.Sum(*data)
return base64.StdEncoding.EncodeToString(hash[:])
}
// QuickXORHashStream hashes a stream.
func QuickXORHashStream(reader io.ReadSeeker) string {
reader.Seek(0, 0)
hash := quickxorhash.New()
io.Copy(hash, reader)
reader.Seek(0, 0)
return base64.StdEncoding.EncodeToString(hash.Sum(nil))
}
// VerifyChecksum checks to see if a DriveItem's checksum matches what it's
// supposed to be. This is less of a cryptographic check and more of a file
// integrity check.
func (d *DriveItem) VerifyChecksum(checksum string) bool {
if len(checksum) == 0 || d.File == nil {
return false
}
return strings.EqualFold(d.File.Hashes.QuickXorHash, checksum)
}
// ETagIsMatch returns true if the etag matches the one in the DriveItem
func (d *DriveItem) ETagIsMatch(etag string) bool {
return d.ETag != "" && d.ETag == etag
}
onedriver-0.14.1/fs/graph/hashes_test.go 0000664 0000000 0000000 00000003327 14513675524 0020165 0 ustar 00root root 0000000 0000000 package graph
import (
"bytes"
"io"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func BenchmarkSHA1(b *testing.B) {
data, _ := os.ReadFile("dmel.fa")
for i := 0; i < b.N; i++ {
SHA1Hash(&data)
}
}
func BenchmarkSHA256(b *testing.B) {
data, _ := os.ReadFile("dmel.fa")
for i := 0; i < b.N; i++ {
SHA256Hash(&data)
}
}
func BenchmarkQuickXORHash(b *testing.B) {
data, _ := os.ReadFile("dmel.fa")
for i := 0; i < b.N; i++ {
QuickXORHash(&data)
}
}
func BenchmarkSHA1Stream(b *testing.B) {
data, _ := os.Open("dmel.fa")
for i := 0; i < b.N; i++ {
SHA1HashStream(data)
}
}
func BenchmarkSHA256Stream(b *testing.B) {
data, _ := os.Open("dmel.fa")
for i := 0; i < b.N; i++ {
SHA256HashStream(data)
}
}
func BenchmarkQuickXORHashStream(b *testing.B) {
data, _ := os.Open("dmel.fa")
for i := 0; i < b.N; i++ {
QuickXORHashStream(data)
}
}
func TestSha1HashReader(t *testing.T) {
content := []byte("this is some text to hash")
expected := SHA1Hash(&content)
reader := bytes.NewReader(content)
actual := SHA1HashStream(reader)
assert.Equal(t, expected, actual)
}
func TestQuickXORHashReader(t *testing.T) {
content := []byte("this is some text to hash")
expected := QuickXORHash(&content)
reader := bytes.NewReader(content)
actual := QuickXORHashStream(reader)
assert.Equal(t, expected, actual)
}
func TestHashSeekPosition(t *testing.T) {
tmp, err := os.CreateTemp("", "onedriverHashTest")
if err != nil {
t.Error(err)
}
content := []byte("some test content")
io.Copy(tmp, bytes.NewBuffer(content))
assert.Equal(t, QuickXORHash(&content), QuickXORHashStream(tmp))
assert.Equal(t, SHA1Hash(&content), SHA1HashStream(tmp))
assert.Equal(t, SHA256Hash(&content), SHA256HashStream(tmp))
}
onedriver-0.14.1/fs/graph/oauth2.go 0000664 0000000 0000000 00000017207 14513675524 0017057 0 ustar 00root root 0000000 0000000 package graph
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"regexp"
"strings"
"time"
"github.com/imdario/mergo"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
// these are default values if not specified
const (
authClientID = "3470c3fa-bc10-45ab-a0a9-2d30836485d1"
authCodeURL = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize"
authTokenURL = "https://login.microsoftonline.com/common/oauth2/v2.0/token"
authRedirectURL = "https://login.live.com/oauth20_desktop.srf"
)
func (a *AuthConfig) applyDefaults() error {
return mergo.Merge(a, AuthConfig{
ClientID: authClientID,
CodeURL: authCodeURL,
TokenURL: authTokenURL,
RedirectURL: authRedirectURL,
})
}
// AuthConfig configures the authentication flow
type AuthConfig struct {
ClientID string `json:"clientID" yaml:"clientID"`
CodeURL string `json:"codeURL" yaml:"codeURL"`
TokenURL string `json:"tokenURL" yaml:"tokenURL"`
RedirectURL string `json:"redirectURL" yaml:"redirectURL"`
}
// Auth represents a set of oauth2 authentication tokens
type Auth struct {
AuthConfig `json:"config"`
Account string `json:"account"`
ExpiresIn int64 `json:"expires_in"` // only used for parsing
ExpiresAt int64 `json:"expires_at"`
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
path string // auth tokens remember their path for use by Refresh()
}
// AuthError is an authentication error from the Microsoft API. Generally we don't see
// these unless something goes catastrophically wrong with Microsoft's authentication
// services.
type AuthError struct {
Error string `json:"error"`
ErrorDescription string `json:"error_description"`
ErrorCodes []int `json:"error_codes"`
ErrorURI string `json:"error_uri"`
Timestamp string `json:"timestamp"` // json.Unmarshal doesn't like this timestamp format
TraceID string `json:"trace_id"`
CorrelationID string `json:"correlation_id"`
}
// ToFile writes auth tokens to a file
func (a Auth) ToFile(file string) error {
a.path = file
byteData, _ := json.Marshal(a)
return ioutil.WriteFile(file, byteData, 0600)
}
// FromFile populates an auth struct from a file
func (a *Auth) FromFile(file string) error {
contents, err := ioutil.ReadFile(file)
if err != nil {
return err
}
a.path = file
err = json.Unmarshal(contents, a)
if err != nil {
return err
}
return a.applyDefaults()
}
// Refresh auth tokens if expired.
func (a *Auth) Refresh() {
if a.ExpiresAt <= time.Now().Unix() {
oldTime := a.ExpiresAt
postData := strings.NewReader("client_id=" + a.ClientID +
"&redirect_uri=" + a.RedirectURL +
"&refresh_token=" + a.RefreshToken +
"&grant_type=refresh_token")
resp, err := http.Post(a.TokenURL,
"application/x-www-form-urlencoded",
postData)
var reauth bool
if err != nil {
if IsOffline(err) || resp == nil {
log.Trace().Err(err).Msg("Network unreachable during token renewal, ignoring.")
return
}
log.Error().Err(err).Msg("Could not POST to renew tokens, forcing reauth.")
reauth = true
} else {
// put here so as to avoid spamming the log when offline
log.Info().Msg("Auth tokens expired, attempting renewal.")
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
json.Unmarshal(body, &a)
if a.ExpiresAt == oldTime {
a.ExpiresAt = time.Now().Unix() + a.ExpiresIn
}
if reauth || a.AccessToken == "" || a.RefreshToken == "" {
log.Error().
Bytes("response", body).
Int("http_code", resp.StatusCode).
Msg("Failed to renew access tokens. Attempting to reauthenticate.")
a = newAuth(a.AuthConfig, a.path, false)
} else {
a.ToFile(a.path)
}
}
}
// Get the appropriate authentication URL for the Graph OAuth2 challenge.
func getAuthURL(a AuthConfig) string {
return a.CodeURL +
"?client_id=" + a.ClientID +
"&scope=" + url.PathEscape("user.read files.readwrite.all offline_access") +
"&response_type=code" +
"&redirect_uri=" + a.RedirectURL
}
// getAuthCodeHeadless has the user perform authentication in their own browser
// instead of WebKit2GTK and then input the auth code in the terminal.
func getAuthCodeHeadless(a AuthConfig, accountName string) string {
fmt.Printf("Please visit the following URL:\n%s\n\n", getAuthURL(a))
fmt.Println("Please enter the redirect URL once you are redirected to a " +
"blank page (after \"Let this app access your info?\"):")
var response string
fmt.Scanln(&response)
code, err := parseAuthCode(response)
if err != nil {
log.Fatal().Msg("No validation code returned, or code was invalid. " +
"Please restart the application and try again.")
}
return code
}
// parseAuthCode is used to parse the auth code out of the redirect the server gives us
// after successful authentication
func parseAuthCode(url string) (string, error) {
rexp := regexp.MustCompile("code=([a-zA-Z0-9-_.])+")
code := rexp.FindString(url)
if len(code) == 0 {
return "", errors.New("invalid auth code")
}
return code[5:], nil
}
// Exchange an auth code for a set of access tokens (returned as a new Auth struct).
func getAuthTokens(a AuthConfig, authCode string) *Auth {
postData := strings.NewReader("client_id=" + a.ClientID +
"&redirect_uri=" + a.RedirectURL +
"&code=" + authCode +
"&grant_type=authorization_code")
resp, err := http.Post(a.TokenURL,
"application/x-www-form-urlencoded",
postData)
if err != nil {
log.Fatal().Err(err).Msg("Could not POST to obtain auth tokens.")
}
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
var auth Auth
json.Unmarshal(body, &auth)
if auth.ExpiresAt == 0 {
auth.ExpiresAt = time.Now().Unix() + auth.ExpiresIn
}
auth.AuthConfig = a
if auth.AccessToken == "" || auth.RefreshToken == "" {
var authErr AuthError
var fields zerolog.Logger
if err := json.Unmarshal(body, &authErr); err == nil {
// we got a parseable error message out of microsoft's servers
fields = log.With().
Int("status", resp.StatusCode).
Str("error", authErr.Error).
Str("errorDescription", authErr.ErrorDescription).
Str("helpUrl", authErr.ErrorURI).
Logger()
} else {
// things are extra broken and this is an error type we haven't seen before
fields = log.With().
Int("status", resp.StatusCode).
Bytes("response", body).
Err(err).
Logger()
}
fields.Fatal().Msg(
"Failed to retrieve access tokens. Authentication cannot continue.",
)
}
return &auth
}
// newAuth performs initial authentication flow and saves tokens to disk. The headless
// parameter determines if we will try to auth directly in the terminal instead of
// doing it via embedded browser.
func newAuth(config AuthConfig, path string, headless bool) *Auth {
// load the old account name
old := Auth{}
old.FromFile(path)
config.applyDefaults()
var code string
if headless {
code = getAuthCodeHeadless(config, old.Account)
} else {
// in a build without CGO, this will be the same as above
code = getAuthCode(config, old.Account)
}
auth := getAuthTokens(config, code)
if user, err := GetUser(auth); err == nil {
auth.Account = user.UserPrincipalName
}
auth.ToFile(path)
return auth
}
// Authenticate performs authentication to Graph or load auth/refreshes it
// from an existing file. If headless is true, we will authenticate in the
// terminal.
func Authenticate(config AuthConfig, path string, headless bool) *Auth {
auth := &Auth{}
_, err := os.Stat(path)
if os.IsNotExist(err) {
// no tokens found, gotta start oauth flow from beginning
auth = newAuth(config, path, headless)
} else {
// we already have tokens, no need to force a new auth flow
auth.FromFile(path)
auth.Refresh()
}
return auth
}
onedriver-0.14.1/fs/graph/oauth2_gtk.c 0000664 0000000 0000000 00000014523 14513675524 0017537 0 ustar 00root root 0000000 0000000 #include
#include
#include
#include
/**
* Get the host from a URI
*/
char *uri_get_host(char *uri) {
if (!uri || strlen(uri) == 1) {
return NULL;
}
int start = 0;
for (int i = 1; i < strlen(uri); i++) {
if (uri[i] != '/') {
// only care about "/"
continue;
}
if (uri[i - 1] == '/') {
// we're at the the "//" in "https://"
start = i + 1;
} else if (start > 0) {
int len = i - start;
char *host = malloc(len);
strncpy(host, uri + start, len);
host[len] = '\0';
return host;
}
}
if (start > 0) {
return strdup(uri + start);
}
return NULL;
}
/**
* Exit the main loop when the window is destroyed.
*/
static void destroy_window(GtkWidget *widget, gpointer data) { gtk_main_quit(); }
/**
* Handle TLS errors with the microsoft side of things.
*/
static gboolean web_view_load_failed_tls(WebKitWebView *web_view, char *failing_uri,
GTlsCertificate *certificate,
GTlsCertificateFlags errors,
gpointer user_data) {
char *reason;
switch (errors) {
case 0:
reason = "No error - There was no error verifying the certificate.";
break;
case G_TLS_CERTIFICATE_UNKNOWN_CA:
reason = "G_TLS_CERTIFICATE_UNKNOWN_CA - The signing certificate authority is "
"not known.";
break;
case G_TLS_CERTIFICATE_BAD_IDENTITY:
reason = "G_TLS_CERTIFICATE_BAD_IDENTITY - The certificate does not match the "
"expected identity of the site that it was retrieved from.";
break;
case G_TLS_CERTIFICATE_NOT_ACTIVATED:
reason = "G_TLS_CERTIFICATE_NOT_ACTIVATED - The certificate's activation time is "
"still in the future.";
break;
case G_TLS_CERTIFICATE_EXPIRED:
reason = "G_TLS_CERTIFICATE_EXPIRED - The certificate has expired.";
break;
case G_TLS_CERTIFICATE_REVOKED:
reason = "G_TLS_CERTIFICATE_REVOKED - The certificate has been revoked according "
"to the GTlsConnection's certificate revocation list.";
break;
case G_TLS_CERTIFICATE_INSECURE:
reason = "G_TLS_CERTIFICATE_INSECURE - The certificate's algorithm is considered "
"insecure.";
break;
case G_TLS_CERTIFICATE_GENERIC_ERROR:
reason = "G_TLS_CERTIFICATE_GENERIC_ERROR - Some other error occurred validating "
"the certificate.";
break;
default:
snprintf(reason, 256,
"Multiple failures (%d) - There were multiple errors during certificate "
"verification.",
errors);
break;
}
g_print("Webkit load failed with TLS errors for %s : %s\n", failing_uri, reason);
// something is up with Fedora 35's verification of this particular cert,
// so we specifically only allow G_TLS_CERTIFICATE_GENERIC_ERROR for only this cert.
char *host = uri_get_host(failing_uri);
if (errors & G_TLS_CERTIFICATE_GENERIC_ERROR &&
strncmp("account.live.com", host, 17) == 0) {
WebKitWebContext *context = webkit_web_view_get_context(web_view);
// allow these failing domains from the webpage and reload
webkit_web_context_allow_tls_certificate_for_host(context, certificate,
"account.live.com");
webkit_web_context_allow_tls_certificate_for_host(context, certificate,
"acctcdn.msauth.net");
webkit_web_context_allow_tls_certificate_for_host(context, certificate,
"acctcdn.msftauth.net");
g_print("Ignoring G_TLS_CERTIFICATE_GENERIC_ERROR for this certificate as a "
"workaround for https://bugzilla.redhat.com/show_bug.cgi?id=2024296 - "
"reloading page.\n");
webkit_web_view_reload(web_view);
return true;
}
return false;
}
/**
* Catch redirects once authentication completes.
*/
static void web_view_load_changed(WebKitWebView *web_view, WebKitLoadEvent load_event,
char *auth_redirect_url_ptr) {
static const char *auth_complete_url = "https://login.live.com/oauth20_desktop.srf";
const char *url = webkit_web_view_get_uri(web_view);
if (load_event == WEBKIT_LOAD_REDIRECTED &&
strncmp(auth_complete_url, url, strlen(auth_complete_url)) == 0) {
// catch redirects to the oauth2 redirect only and destroy the window
strncpy(auth_redirect_url_ptr, url, 2047);
GtkWidget *parent = gtk_widget_get_parent(GTK_WIDGET(web_view));
gtk_widget_destroy(parent);
}
}
/**
* Open a popup GTK auth window and return the final redirect location.
*/
char *webkit_auth_window(char *auth_url, char *account_name) {
gtk_init(NULL, NULL);
GtkWidget *auth_window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
if (account_name && strlen(account_name) > 0) {
char title[512];
snprintf(title, 511, "onedriver (%s)", account_name);
gtk_window_set_title(GTK_WINDOW(auth_window), title);
gtk_window_set_default_size(GTK_WINDOW(auth_window), 525, 600);
} else {
gtk_window_set_title(GTK_WINDOW(auth_window), "onedriver");
gtk_window_set_default_size(GTK_WINDOW(auth_window), 450, 600);
}
// create browser and add to gtk window
WebKitWebView *web_view = WEBKIT_WEB_VIEW(webkit_web_view_new());
gtk_container_add(GTK_CONTAINER(auth_window), GTK_WIDGET(web_view));
webkit_web_view_load_uri(web_view, auth_url);
char auth_redirect_value[2048];
auth_redirect_value[0] = '\0';
g_signal_connect(web_view, "load-changed", G_CALLBACK(web_view_load_changed),
&auth_redirect_value);
g_signal_connect(web_view, "load-failed-with-tls-errors",
G_CALLBACK(web_view_load_failed_tls), NULL);
g_signal_connect(auth_window, "destroy", G_CALLBACK(destroy_window), web_view);
// show and grab focus
gtk_widget_grab_focus(GTK_WIDGET(web_view));
gtk_widget_show_all(auth_window);
gtk_main();
return strdup(auth_redirect_value);
}
onedriver-0.14.1/fs/graph/oauth2_gtk.go 0000664 0000000 0000000 00000002445 14513675524 0017722 0 ustar 00root root 0000000 0000000 //go:build linux && cgo
// +build linux,cgo
package graph
/*
#cgo linux pkg-config: webkit2gtk-4.1
#include "stdlib.h"
#include "oauth2_gtk.h"
*/
import "C"
import (
"unsafe"
"github.com/rs/zerolog/log"
)
// Fetch the auth code required as the first part of oauth2 authentication. Uses
// webkit2gtk to create a popup browser.
func getAuthCode(a AuthConfig, accountName string) string {
cAuthURL := C.CString(getAuthURL(a))
cAccountName := C.CString(accountName)
cResponse := C.webkit_auth_window(cAuthURL, cAccountName)
response := C.GoString(cResponse)
C.free(unsafe.Pointer(cAuthURL))
C.free(unsafe.Pointer(cAccountName))
C.free(unsafe.Pointer(cResponse))
code, err := parseAuthCode(response)
if err != nil {
//TODO create a popup with the auth failure message here instead of a log message
log.Fatal().Msg("No validation code returned, or code was invalid. " +
"Please restart the application and try again.")
}
return code
}
// uriGetHost is exclusively here for testing because we cannot use CGo in tests,
// but can use functions that invoke CGo in tests.
func uriGetHost(uri string) string {
input := C.CString(uri)
defer C.free(unsafe.Pointer(input))
host := C.uri_get_host(input)
defer C.free(unsafe.Pointer(host))
if host == nil {
return ""
}
return C.GoString(host)
}
onedriver-0.14.1/fs/graph/oauth2_gtk.h 0000664 0000000 0000000 00000000153 14513675524 0017536 0 ustar 00root root 0000000 0000000 #pragma once
char *uri_get_host(char *uri);
char *webkit_auth_window(char *auth_url, char *account_name);
onedriver-0.14.1/fs/graph/oauth2_gtk_test.go 0000664 0000000 0000000 00000001037 14513675524 0020755 0 ustar 00root root 0000000 0000000 //go:build linux && cgo
// +build linux,cgo
package graph
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestURIGetHost(t *testing.T) {
host := uriGetHost("this won't work")
assert.Equal(t, "", host, "Func should return NULL if not a valid URI")
host = uriGetHost("https://account.live.com/test/index.html")
assert.Equal(t, "account.live.com", host, "Failed URI host with extra path.")
host = uriGetHost("http://account.live.com")
assert.Equal(t, "account.live.com", host, "Failed URI host without extra path")
}
onedriver-0.14.1/fs/graph/oauth2_headless.go 0000664 0000000 0000000 00000000412 14513675524 0020715 0 ustar 00root root 0000000 0000000 //go:build !linux || !cgo
// +build !linux !cgo
package graph
// accountName arg is only present for compatibility with the non-headless C version.
func getAuthCode(config AuthConfig, accountName string) string {
return getAuthCodeHeadless(config, accountName)
}
onedriver-0.14.1/fs/graph/oauth2_test.go 0000664 0000000 0000000 00000006435 14513675524 0020117 0 ustar 00root root 0000000 0000000 package graph
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestAuthCodeFormat(t *testing.T) {
// arbitrary regext test
code, err := parseAuthCode("codecode=asd-965198osATYjfb._knlwoieurow*sdjf")
assert.NoError(t, err)
assert.Equal(t, "asd-965198osATYjfb._knlwoieurow", code,
"Auth code parser did not succeed against arbitrary character test.")
// faked personal auth code
code, err = parseAuthCode("https://login.live.com/oauth20_desktop.srf?code=M.R3_BAY.abcd526-817f-d8e9-590c-1227b45c7be2&lc=4105")
assert.NoError(t, err)
assert.Equal(t, "M.R3_BAY.abcd526-817f-d8e9-590c-1227b45c7be2", code,
"Personal auth code did not match expected result.")
// faked business auth code
code, err = parseAuthCode("https://login.live.com/oauth20_desktop.srf?code=0.BAAA-AeXRPDP_sEe7XktwiDweriowjeirjcDQQvKtFoKktMINkhdEzAAA.AQABAAAAARWERAB2UyzwtQEKR7-rWbgdcBZICdWKCJnfnPJurxUN_QbF3GS6OQqQiK987AbLAv2QykQMIGAz4XCvkO8kB3XC8RYV10qmnmHcMUgo7u5UubpgpR3OW3TVlMSZ-3vxjkcEHlsnVoBqfUFdcj8fYR_mP6w0xkB8MmLG3i5F-JtcaLKfQu13941lsdjkfdh0acjHBGJHVzpBbuiVfzN6vMygFiS2xAQGF668M_l69dXRmG1tq3ZwU6J0-FWYNfK_Ro4YS2m38bcNmZQ8iEolV78t34HKxCYZnl4iqeYF7b7hkTM7ZIcsDBoeZvW1Cu6dIQ7xC4NZGILltOXY5V6A-kcLCZaYuSFW_R8dEM-cqGr_5Gv1GhgfqyXd-2XYNvGda9ok20JrYEmMiezfnyRV-vc7rdtlLOVI_ubzhrjezAvtAApPEj3dJdcmW_0qns_R27pVDlU1xkDagQAquhrftE_sZHbRGvnAsdfaoim1SjcX7QosTELyoWeAczip4MPYqmJ1uVjpWb533vA5WZMyWatiDuNYhnj48SsfEP2zaUQFU55Aj90hEOhOPl77AOu0-zNfAGXeWAQhTPO2rZ0ZgHottFwLoq8aA52sTW-hf7kB0chFUaUvLkxKr1L-Zi7vyCBoArlciFV3zyMxiQ8kjR3vxfwlerjowicmcgqJD-8lxioiwerwlbrlQWyAA&session_state=3fa7b212-7dbb-44e6-bddd-812fwieojw914341")
assert.NoError(t, err)
if code != "0.BAAA-AeXRPDP_sEe7XktwiDweriowjeirjcDQQvKtFoKktMINkhdEzAAA.AQABAAAAARWERAB2UyzwtQEKR7-rWbgdcBZICdWKCJnfnPJurxUN_QbF3GS6OQqQiK987AbLAv2QykQMIGAz4XCvkO8kB3XC8RYV10qmnmHcMUgo7u5UubpgpR3OW3TVlMSZ-3vxjkcEHlsnVoBqfUFdcj8fYR_mP6w0xkB8MmLG3i5F-JtcaLKfQu13941lsdjkfdh0acjHBGJHVzpBbuiVfzN6vMygFiS2xAQGF668M_l69dXRmG1tq3ZwU6J0-FWYNfK_Ro4YS2m38bcNmZQ8iEolV78t34HKxCYZnl4iqeYF7b7hkTM7ZIcsDBoeZvW1Cu6dIQ7xC4NZGILltOXY5V6A-kcLCZaYuSFW_R8dEM-cqGr_5Gv1GhgfqyXd-2XYNvGda9ok20JrYEmMiezfnyRV-vc7rdtlLOVI_ubzhrjezAvtAApPEj3dJdcmW_0qns_R27pVDlU1xkDagQAquhrftE_sZHbRGvnAsdfaoim1SjcX7QosTELyoWeAczip4MPYqmJ1uVjpWb533vA5WZMyWatiDuNYhnj48SsfEP2zaUQFU55Aj90hEOhOPl77AOu0-zNfAGXeWAQhTPO2rZ0ZgHottFwLoq8aA52sTW-hf7kB0chFUaUvLkxKr1L-Zi7vyCBoArlciFV3zyMxiQ8kjR3vxfwlerjowicmcgqJD-8lxioiwerwlbrlQWyAA" {
t.Error("Business auth code did not match expected result.")
}
}
func TestAuthFromfile(t *testing.T) {
t.Parallel()
require.FileExists(t, ".auth_tokens.json")
var auth Auth
auth.FromFile(".auth_tokens.json")
assert.NotEqual(t, "", auth.AccessToken, "Could not load auth tokens from '.auth_tokens.json'!")
}
func TestAuthRefresh(t *testing.T) {
t.Parallel()
require.FileExists(t, ".auth_tokens.json")
var auth Auth
auth.FromFile(".auth_tokens.json")
auth.ExpiresAt = 0 // force an auth refresh
auth.Refresh()
if auth.ExpiresAt <= time.Now().Unix() {
t.Fatal("Auth could not be refreshed successfully!")
}
}
func TestAuthConfigMerge(t *testing.T) {
t.Parallel()
testConfig := AuthConfig{RedirectURL: "test"}
assert.NoError(t, testConfig.applyDefaults())
assert.Equal(t, "test", testConfig.RedirectURL)
assert.Equal(t, authClientID, testConfig.ClientID)
}
onedriver-0.14.1/fs/graph/quickxorhash/ 0000775 0000000 0000000 00000000000 14513675524 0020030 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/fs/graph/quickxorhash/COPYING 0000664 0000000 0000000 00000002107 14513675524 0021063 0 ustar 00root root 0000000 0000000 Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
onedriver-0.14.1/fs/graph/quickxorhash/quickxorhash.go 0000664 0000000 0000000 00000014214 14513675524 0023072 0 ustar 00root root 0000000 0000000 // Package quickxorhash provides the quickXorHash algorithm which is a
// quick, simple non-cryptographic hash algorithm that works by XORing
// the bytes in a circular-shifting fashion.
//
// It is used by Microsoft Onedrive for Business to hash data.
//
// See: https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
package quickxorhash
// This code was ported from the code snippet linked from
// https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash
// Which has the copyright
// ------------------------------------------------------------------------------
// Copyright (c) 2016 Microsoft Corporation
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
// ------------------------------------------------------------------------------
import (
"hash"
)
const (
// BlockSize is the preferred size for hashing
BlockSize = 64
// Size of the output checksum
Size = 20
bitsInLastCell = 32
shift = 11
widthInBits = 8 * Size
dataSize = (widthInBits-1)/64 + 1
)
type quickXorHash struct {
data [dataSize]uint64
lengthSoFar uint64
shiftSoFar int
}
// New returns a new hash.Hash computing the quickXorHash checksum.
func New() hash.Hash {
return &quickXorHash{}
}
// Write (via the embedded io.Writer interface) adds more data to the running hash.
// It never returns an error.
//
// Write writes len(p) bytes from p to the underlying data stream. It returns
// the number of bytes written from p (0 <= n <= len(p)) and any error
// encountered that caused the write to stop early. Write must return a non-nil
// error if it returns n < len(p). Write must not modify the slice data, even
// temporarily.
//
// Implementations must not retain p.
func (q *quickXorHash) Write(p []byte) (n int, err error) {
currentshift := q.shiftSoFar
// The bitvector where we'll start xoring
vectorArrayIndex := currentshift / 64
// The position within the bit vector at which we begin xoring
vectorOffset := currentshift % 64
iterations := len(p)
if iterations > widthInBits {
iterations = widthInBits
}
for i := 0; i < iterations; i++ {
isLastCell := vectorArrayIndex == len(q.data)-1
var bitsInVectorCell int
if isLastCell {
bitsInVectorCell = bitsInLastCell
} else {
bitsInVectorCell = 64
}
// There's at least 2 bitvectors before we reach the end of the array
if vectorOffset <= bitsInVectorCell-8 {
for j := i; j < len(p); j += widthInBits {
q.data[vectorArrayIndex] ^= uint64(p[j]) << uint(vectorOffset)
}
} else {
index1 := vectorArrayIndex
var index2 int
if isLastCell {
index2 = 0
} else {
index2 = vectorArrayIndex + 1
}
low := byte(bitsInVectorCell - vectorOffset)
xoredByte := byte(0)
for j := i; j < len(p); j += widthInBits {
xoredByte ^= p[j]
}
q.data[index1] ^= uint64(xoredByte) << uint(vectorOffset)
q.data[index2] ^= uint64(xoredByte) >> low
}
vectorOffset += shift
for vectorOffset >= bitsInVectorCell {
if isLastCell {
vectorArrayIndex = 0
} else {
vectorArrayIndex = vectorArrayIndex + 1
}
vectorOffset -= bitsInVectorCell
}
}
// Update the starting position in a circular shift pattern
q.shiftSoFar = (q.shiftSoFar + shift*(len(p)%widthInBits)) % widthInBits
q.lengthSoFar += uint64(len(p))
return len(p), nil
}
// Calculate the current checksum
func (q *quickXorHash) checkSum() (h [Size]byte) {
// Output the data as little endian bytes
ph := 0
for i := 0; i < len(q.data)-1; i++ {
d := q.data[i]
_ = h[ph+7] // bounds check
h[ph+0] = byte(d >> (8 * 0))
h[ph+1] = byte(d >> (8 * 1))
h[ph+2] = byte(d >> (8 * 2))
h[ph+3] = byte(d >> (8 * 3))
h[ph+4] = byte(d >> (8 * 4))
h[ph+5] = byte(d >> (8 * 5))
h[ph+6] = byte(d >> (8 * 6))
h[ph+7] = byte(d >> (8 * 7))
ph += 8
}
// remaining 32 bits
d := q.data[len(q.data)-1]
h[Size-4] = byte(d >> (8 * 0))
h[Size-3] = byte(d >> (8 * 1))
h[Size-2] = byte(d >> (8 * 2))
h[Size-1] = byte(d >> (8 * 3))
// XOR the file length with the least significant bits in little endian format
d = q.lengthSoFar
h[Size-8] ^= byte(d >> (8 * 0))
h[Size-7] ^= byte(d >> (8 * 1))
h[Size-6] ^= byte(d >> (8 * 2))
h[Size-5] ^= byte(d >> (8 * 3))
h[Size-4] ^= byte(d >> (8 * 4))
h[Size-3] ^= byte(d >> (8 * 5))
h[Size-2] ^= byte(d >> (8 * 6))
h[Size-1] ^= byte(d >> (8 * 7))
return h
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (q *quickXorHash) Sum(b []byte) []byte {
hash := q.checkSum()
return append(b, hash[:]...)
}
// Reset resets the Hash to its initial state.
func (q *quickXorHash) Reset() {
*q = quickXorHash{}
}
// Size returns the number of bytes Sum will return.
func (q *quickXorHash) Size() int {
return Size
}
// BlockSize returns the hash's underlying block size.
// The Write method must be able to accept any amount
// of data, but it may operate more efficiently if all writes
// are a multiple of the block size.
func (q *quickXorHash) BlockSize() int {
return BlockSize
}
// Sum returns the quickXorHash checksum of the data.
func Sum(data []byte) [Size]byte {
var d quickXorHash
_, _ = d.Write(data)
return d.checkSum()
}
onedriver-0.14.1/fs/graph/quickxorhash/quickxorhash_test.go 0000664 0000000 0000000 00000021472 14513675524 0024135 0 ustar 00root root 0000000 0000000 package quickxorhash
import (
"encoding/base64"
"fmt"
"hash"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var testVectors = []struct {
size int
in string
out string
}{
{0, ``, "AAAAAAAAAAAAAAAAAAAAAAAAAAA="},
{1, `Sg==`, "SgAAAAAAAAAAAAAAAQAAAAAAAAA="},
{2, `tbQ=`, "taAFAAAAAAAAAAAAAgAAAAAAAAA="},
{3, `0pZP`, "0rDEEwAAAAAAAAAAAwAAAAAAAAA="},
{4, `jRRDVA==`, "jaDAEKgAAAAAAAAABAAAAAAAAAA="},
{5, `eAV52qE=`, "eChAHrQRCgAAAAAABQAAAAAAAAA="},
{6, `luBZlaT6`, "lgBHFipBCn0AAAAABgAAAAAAAAA="},
{7, `qaApEj66lw==`, "qQBFCiTgA11cAgAABwAAAAAAAAA="},
{8, `/aNzzCFPS/A=`, "/RjFHJgRgicsAR4ACAAAAAAAAAA="},
{9, `n6Neh7p6fFgm`, "nxiFFw6hCz3wAQsmCQAAAAAAAAA="},
{10, `J9iPGCbfZSTNyw==`, "J8DGIzBggm+UgQTNUgYAAAAAAAA="},
{11, `i+UZyUGJKh+ISbk=`, "iyhHBpIRhESo4AOIQ0IuAAAAAAA="},
{12, `h490d57Pqz5q2rtT`, "h3gEHe7giWeswgdq3MYupgAAAAA="},
{13, `vPgoDjOfO6fm71RxLw==`, "vMAHChwwg0/s4BTmdQcV4vACAAA="},
{14, `XoJ1AsoR4fDYJrDqYs4=`, "XhBEHQSgjAiEAx7YPgEs1CEGZwA="},
{15, `gQaybEqS/4UlDc8e4IJm`, "gDCALNigBEn8oxAlZ8AzPAAOQZg="},
{16, `2fuxhBJXtpWFe8dOfdGeHw==`, "O9tHLAghgSvYohKFyMMxnNCHaHg="},
{17, `XBV6YKU9V7yMakZnFIxIkuU=`, "HbplHsBQih5cgReMQYMRzkABRiA="},
{18, `XJZSOiNO2bmfKnTKD7fztcQX`, "/6ZArHQwAidkIxefQgEdlPGAW8w="},
{19, `g8VtAh+2Kf4k0kY5tzji2i2zmA==`, "wDNrgwHWAVukwB8kg4YRcnALHIg="},
{20, `T6LYJIfDh81JrAK309H2JMJTXis=`, "zBTHrspn3mEcohlJdIUAbjGNaNg="},
{21, `DWAAX5/CIfrmErgZa8ot6ZraeSbu`, "LR2Z0PjuRYGKQB/mhQAuMrAGZbQ="},
{22, `N9abi3qy/mC1THZuVLHPpx7SgwtLOA==`, "1KTYttCBEen8Hwy1doId3ECFWDw="},
{23, `LlUe7wHerLqEtbSZLZgZa9u0m7hbiFs=`, "TqVZpxs3cN61BnuFvwUtMtECTGQ="},
{24, `bU2j/0XYdgfPFD4691jV0AOUEUPR4Z5E`, "bnLBiLpVgnxVkXhNsIAPdHAPLFQ="},
{25, `lScPwPsyUsH2T1Qsr31wXtP55Wqbe47Uyg==`, "VDMSy8eI26nBHCB0e8gVWPCKPsA="},
{26, `rJaKh1dLR1k+4hynliTZMGf8Nd4qKKoZiAM=`, "r7bjwkl8OYQeNaMcCY8fTmEJEmQ="},
{27, `pPsT0CPmHrd3Frsnva1pB/z1ytARLeHEYRCo`, "Rdg7rCcDomL59pL0s6GuTvqLVqQ="},
{28, `wSRChaqmrsnMrfB2yqI43eRWbro+f9kBvh+01w==`, "YTtloIi6frI7HX3vdLvE7I2iUOA="},
{29, `apL67KMIRxQeE9k1/RuW09ppPjbF1WeQpTjSWtI=`, "CIpedls+ZlSQ654fl+X26+Q7LVU="},
{30, `53yx0/QgMTVb7OOzHRHbkS7ghyRc+sIXxi7XHKgT`, "zfJtLGFgR9DB3Q64fAFIp+S5iOY="},
{31, `PwXNnutoLLmxD8TTog52k8cQkukmT87TTnDipKLHQw==`, "PTaGs7yV3FUyBy/SfU6xJRlCJlI="},
{32, `NbYXsp5/K6mR+NmHwExjvWeWDJFnXTKWVlzYHoesp2E=`, "wjuAuWDiq04qDt1R8hHWDDcwVoQ="},
{33, `qQ70RB++JAR5ljNv3lJt1PpqETPsckopfonItu18Cr3E`, "FkJaeg/0Z5+euShYlLpE2tJh+Lo="},
{34, `RhzSatQTQ9/RFvpHyQa1WLdkr3nIk6MjJUma998YRtp44A==`, "SPN2D29reImAqJezlqV2DLbi8tk="},
{35, `DND1u1uZ5SqZVpRUk6NxSUdVo7IjjL9zs4A1evDNCDLcXWc=`, "S6lBk2hxI2SWBfn7nbEl7D19UUs="},
{36, `jEi62utFz69JMYHjg1iXy7oO6ZpZSLcVd2B+pjm6BGsv/CWi`, "s0lYU9tr/bp9xsnrrjYgRS5EvV8="},
{37, `hfS3DZZnhy0hv7nJdXLv/oJOtIgAuP9SInt/v8KeuO4/IvVh4A==`, "CV+HQCdd2A/e/vdi12f2UU55GLA="},
{38, `EkPQAC6ymuRrYjIXD/LT/4Vb+7aTjYVZOHzC8GPCEtYDP0+T3Nc=`, "kE9H9sEmr3vHBYUiPbvsrcDgSEo="},
{39, `vtBOGIENG7yQ/N7xNWPNIgy66Gk/I2Ur/ZhdFNUK9/1FCZuu/KeS`, "+Fgp3HBimtCzUAyiinj3pkarYTk="},
{40, `YnF4smoy9hox2jBlJ3VUa4qyCRhOZbWcmFGIiszTT4zAdYHsqJazyg==`, "arkIn+ELddmE8N34J9ydyFKW+9w="},
{41, `0n7nl3YJtipy6yeUbVPWtc2h45WbF9u8hTz5tNwj3dZZwfXWkk+GN3g=`, "YJLNK7JR64j9aODWfqDvEe/u6NU="},
{42, `FnIIPHayc1pHkY4Lh8+zhWwG8xk6Knk/D3cZU1/fOUmRAoJ6CeztvMOL`, "22RPOylMtdk7xO/QEQiMli4ql0k="},
{43, `J82VT7ND0Eg1MorSfJMUhn+qocF7PsUpdQAMrDiHJ2JcPZAHZ2nyuwjoKg==`, "pOR5eYfwCLRJbJsidpc1rIJYwtM="},
{44, `Zbu+78+e35ZIymV5KTDdub5McyI3FEO8fDxs62uWHQ9U3Oh3ZqgaZ30SnmQ=`, "DbvbTkgNTgWRqRidA9r1jhtUjro="},
{45, `lgybK3Da7LEeY5aeeNrqcdHvv6mD1W4cuQ3/rUj2C/CNcSI0cAMw6vtpVY3y`, "700RQByn1lRQSSme9npQB/Ye+bY="},
{46, `jStZgKHv4QyJLvF2bYbIUZi/FscHALfKHAssTXkrV1byVR9eACwW9DNZQRHQwg==`, "uwN55He8xgE4g93dH9163xPew4U="},
{47, `V1PSud3giF5WW72JB/bgtltsWtEB5V+a+wUALOJOGuqztzVXUZYrvoP3XV++gM0=`, "U+3ZfUF/6mwOoHJcSHkQkckfTDA="},
{48, `VXs4t4tfXGiWAL6dlhEMm0YQF0f2w9rzX0CvIVeuW56o6/ec2auMpKeU2VeteEK5`, "sq24lSf7wXLH8eigHl07X+qPTps="},
{49, `bLUn3jLH+HFUsG3ptWTHgNvtr3eEv9lfKBf0jm6uhpqhRwtbEQ7Ovj/hYQf42zfdtQ==`, "uC8xrnopGiHebGuwgq607WRQyxQ="},
{50, `4SVmjtXIL8BB8SfkbR5Cpaljm2jpyUfAhIBf65XmKxHlz9dy5XixgiE/q1lv+esZW/E=`, "wxZ0rxkMQEnRNAp8ZgEZLT4RdLM="},
{51, `pMljctlXeFUqbG3BppyiNbojQO3ygg6nZPeUZaQcVyJ+Clgiw3Q8ntLe8+02ZSfyCc39`, "aZEPmNvOXnTt7z7wt+ewV7QGMlg="},
{52, `C16uQlxsHxMWnV2gJhFPuJ2/guZ4N1YgmNvAwL1yrouGQtwieGx8WvZsmYRnX72JnbVtTw==`, "QtlSNqXhVij64MMhKJ3EsDFB/z8="},
{53, `7ZVDOywvrl3L0GyKjjcNg2CcTI81n2CeUbzdYWcZOSCEnA/xrNHpiK01HOcGh3BbxuS4S6g=`, "4NznNJc4nmXeApfiCFTq/H5LbHw="},
{54, `JXm2tTVqpYuuz2Cc+ZnPusUb8vccPGrzWK2oVwLLl/FjpFoxO9FxGlhnB08iu8Q/XQSdzHn+`, "IwE5+2pKNcK366I2k2BzZYPibSI="},
{55, `TiiU1mxzYBSGZuE+TX0l9USWBilQ7dEml5lLrzNPh75xmhjIK8SGqVAkvIMgAmcMB+raXdMPZg==`, "yECGHtgR128ScP4XlvF96eLbIBE="},
{56, `zz+Q4zi6wh0fCJUFU9yUOqEVxlIA93gybXHOtXIPwQQ44pW4fyh6BRgc1bOneRuSWp85hwlTJl8=`, "+3Ef4D6yuoC8J+rbFqU1cegverE="},
{57, `sa6SHK9z/G505bysK5KgRO2z2cTksDkLoFc7sv0tWBmf2G2mCiozf2Ce6EIO+W1fRsrrtn/eeOAV`, "xZg1CwMNAjN0AIXw2yh4+1N3oos="},
{58, `0qx0xdyTHhnKJ22IeTlAjRpWw6y2sOOWFP75XJ7cleGJQiV2kyrmQOST4DGHIL0qqA7sMOdzKyTV
iw==`, "bS0tRYPkP1Gfc+ZsBm9PMzPunG8="},
{59, `QuzaF0+5ooig6OLEWeibZUENl8EaiXAQvK9UjBEauMeuFFDCtNcGs25BDtJGGbX90gH4VZvCCDNC
q4s=`, "rggokuJq1OGNOfB6aDp2g4rdPgw="},
{60, `+wg2x23GZQmMLkdv9MeAdettIWDmyK6Wr+ba23XD+Pvvq1lIMn9QIQT4Z7QHJE3iC/ZMFgaId9VA
yY3d`, "ahQbTmOdiKUNdhYRHgv5/Ky+Y6k="},
{61, `y0ydRgreRQwP95vpNP92ioI+7wFiyldHRbr1SfoPNdbKGFA0lBREaBEGNhf9yixmfE+Azo2AuROx
b7Yc7g==`, "cJKFc0dXfiN4hMg1lcMf5E4gqvo="},
{62, `LxlVvGXSQlSubK8r0pGf9zf7s/3RHe75a2WlSXQf3gZFR/BtRnR7fCIcaG//CbGfodBFp06DBx/S
9hUV8Bk=`, "NwuwhhRWX8QZ/vhWKWgQ1+rNomI="},
{63, `L+LSB8kmGMnHaWVA5P/+qFnfQliXvgJW7d2JGAgT6+koi5NQujFW1bwQVoXrBVyob/gBxGizUoJM
gid5gGNo`, "ndX/KZBtFoeO3xKeo1ajO/Jy+rY="},
{64, `Mb7EGva2rEE5fENDL85P+BsapHEEjv2/siVhKjvAQe02feExVOQSkfmuYzU/kTF1MaKjPmKF/w+c
bvwfdWL8aQ==`, "n1anP5NfvD4XDYWIeRPW3ZkPv1Y="},
{111, `jyibxJSzO6ZiZ0O1qe3tG/bvIAYssvukh9suIT5wEy1JBINVgPiqdsTW0cOpP0aUfP7mgqLfADkz
I/m/GgCuVhr8oFLrOCoTx1/psBOWwhltCbhUx51Icm9aH8tY4Z3ccU+6BKpYQkLCy0B/A9Zc`, "hZfLIilSITC6N3e3tQ/iSgEzkto="},
{128, `ikwCorI7PKWz17EI50jZCGbV9JU2E8bXVfxNMg5zdmqSZ2NlsQPp0kqYIPjzwTg1MBtfWPg53k0h
0P2naJNEVgrqpoHTfV2b3pJ4m0zYPTJmUX4Bg/lOxcnCxAYKU29Y5F0U8Quz7ZXFBEweftXxJ7RS
4r6N7BzJrPsLhY7hgck=`, "imAoFvCWlDn4yVw3/oq1PDbbm6U="},
{222, `PfxMcUd0vIW6VbHG/uj/Y0W6qEoKmyBD0nYebEKazKaKG+UaDqBEcmQjbfQeVnVLuodMoPp7P7TR
1htX5n2VnkHh22xDyoJ8C/ZQKiSNqQfXvh83judf4RVr9exJCud8Uvgip6aVZTaPrJHVjQhMCp/d
EnGvqg0oN5OVkM2qqAXvA0teKUDhgNM71sDBVBCGXxNOR2bpbD1iM4dnuT0ey4L+loXEHTL0fqMe
UcEi2asgImnlNakwenDzz0x57aBwyq3AspCFGB1ncX4yYCr/OaCcS5OKi/00WH+wNQU3`, "QX/YEpG0gDsmhEpCdWhsxDzsfVE="},
{256, `qwGf2ESubE5jOUHHyc94ORczFYYbc2OmEzo+hBIyzJiNwAzC8PvJqtTzwkWkSslgHFGWQZR2BV5+
uYTrYT7HVwRM40vqfj0dBgeDENyTenIOL1LHkjtDKoXEnQ0mXAHoJ8PjbNC93zi5TovVRXTNzfGE
s5dpWVqxUzb5lc7dwkyvOluBw482mQ4xrzYyIY1t+//OrNi1ObGXuUw2jBQOFfJVj2Y6BOyYmfB1
y36eBxi3zxeG5d5NYjm2GSh6e08QMAwu3zrINcqIzLOuNIiGXBtl7DjKt7b5wqi4oFiRpZsCyx2s
mhSrdrtK/CkdU6nDN+34vSR/M8rZpWQdBE7a8g==`, "WYT9JY3JIo/pEBp+tIM6Gt2nyTM="},
{333, `w0LGhqU1WXFbdavqDE4kAjEzWLGGzmTNikzqnsiXHx2KRReKVTxkv27u3UcEz9+lbMvYl4xFf2Z4
aE1xRBBNd1Ke5C0zToSaYw5o4B/7X99nKK2/XaUX1byLow2aju2XJl2OpKpJg+tSJ2fmjIJTkfuY
Uz574dFX6/VXxSxwGH/xQEAKS5TCsBK3CwnuG1p5SAsQq3gGVozDWyjEBcWDMdy8/AIFrj/y03Lf
c/RNRCQTAfZbnf2QwV7sluw4fH3XJr07UoD0YqN+7XZzidtrwqMY26fpLZnyZjnBEt1FAZWO7RnK
G5asg8xRk9YaDdedXdQSJAOy6bWEWlABj+tVAigBxavaluUH8LOj+yfCFldJjNLdi90fVHkUD/m4
Mr5OtmupNMXPwuG3EQlqWUVpQoYpUYKLsk7a5Mvg6UFkiH596y5IbJEVCI1Kb3D1`, "e3+wo77iKcILiZegnzyUNcjCdoQ="},
}
func TestQuickXorHash(t *testing.T) {
for _, test := range testVectors {
what := fmt.Sprintf("test size %d", test.size)
in, err := base64.StdEncoding.DecodeString(test.in)
require.NoError(t, err, what)
got := Sum(in)
want, err := base64.StdEncoding.DecodeString(test.out)
require.NoError(t, err, what)
assert.Equal(t, want, got[:], what)
}
}
func TestQuickXorHashByBlock(t *testing.T) {
for _, blockSize := range []int{1, 2, 4, 7, 8, 16, 32, 64, 128, 256, 512} {
for _, test := range testVectors {
what := fmt.Sprintf("test size %d blockSize %d", test.size, blockSize)
in, err := base64.StdEncoding.DecodeString(test.in)
require.NoError(t, err, what)
h := New()
for i := 0; i < len(in); i += blockSize {
end := i + blockSize
if end > len(in) {
end = len(in)
}
n, err := h.Write(in[i:end])
require.Equal(t, end-i, n, what)
require.NoError(t, err, what)
}
got := h.Sum(nil)
want, err := base64.StdEncoding.DecodeString(test.out)
require.NoError(t, err, what)
assert.Equal(t, want, got, test.size, what)
}
}
}
func TestSize(t *testing.T) {
d := New()
assert.Equal(t, 20, d.Size())
}
func TestBlockSize(t *testing.T) {
d := New()
assert.Equal(t, 64, d.BlockSize())
}
func TestReset(t *testing.T) {
d := New()
zeroHash := d.Sum(nil)
_, _ = d.Write([]byte{1})
assert.NotEqual(t, zeroHash, d.Sum(nil))
d.Reset()
assert.Equal(t, zeroHash, d.Sum(nil))
}
// check interface
var _ hash.Hash = (*quickXorHash)(nil)
onedriver-0.14.1/fs/graph/setup_test.go 0000664 0000000 0000000 00000001275 14513675524 0020052 0 ustar 00root root 0000000 0000000 package graph
import (
"os"
"testing"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
func TestMain(m *testing.M) {
os.Chdir("../..")
f, _ := os.OpenFile("fusefs_tests.log", os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0644)
zerolog.SetGlobalLevel(zerolog.TraceLevel)
log.Logger = log.Output(zerolog.ConsoleWriter{Out: f, TimeFormat: "15:04:05"})
defer f.Close()
// auth and log account metadata so we're extra sure who we're testing against
auth := Authenticate(AuthConfig{}, ".auth_tokens.json", false)
user, _ := GetUser(auth)
drive, _ := GetDrive(auth)
log.Info().
Str("account", user.UserPrincipalName).
Str("type", drive.DriveType).
Msg("Starting tests")
os.Exit(m.Run())
}
onedriver-0.14.1/fs/inode.go 0000664 0000000 0000000 00000016011 14513675524 0015642 0 ustar 00root root 0000000 0000000 package fs
import (
"encoding/json"
"math/rand"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/fs/graph"
)
// Inode represents a file or folder fetched from the Graph API. All struct
// fields are pointers so as to avoid including them when marshaling to JSON
// if not present. The embedded DriveItem's fields should never be accessed, they
// are there for JSON umarshaling/marshaling only. (They are not safe to access
// concurrently.) This struct's methods are thread-safe and can be called
// concurrently. Reads/writes are done directly to DriveItems instead of
// implementing something like the fs.FileHandle to minimize the complexity of
// operations like Flush.
type Inode struct {
sync.RWMutex
graph.DriveItem
nodeID uint64 // filesystem node id
children []string // a slice of ids, nil when uninitialized
hasChanges bool // used to trigger an upload on flush
subdir uint32 // used purely by NLink()
mode uint32 // do not set manually
}
// SerializeableInode is like a Inode, but can be serialized for local storage
// to disk
type SerializeableInode struct {
graph.DriveItem
Children []string
Subdir uint32
Mode uint32
}
// NewInode initializes a new Inode
func NewInode(name string, mode uint32, parent *Inode) *Inode {
itemParent := &graph.DriveItemParent{ID: "", Path: ""}
if parent != nil {
itemParent.Path = parent.Path()
parent.RLock()
itemParent.ID = parent.DriveItem.ID
itemParent.DriveID = parent.DriveItem.Parent.DriveID
itemParent.DriveType = parent.DriveItem.Parent.DriveType
parent.RUnlock()
}
currentTime := time.Now()
return &Inode{
DriveItem: graph.DriveItem{
ID: localID(),
Name: name,
Parent: itemParent,
ModTime: ¤tTime,
},
children: make([]string, 0),
mode: mode,
}
}
// AsJSON converts a DriveItem to JSON for use with local storage. Not used with
// the API. FIXME: If implemented as MarshalJSON, this will break delta syncs
// for business accounts. Don't ask me why.
func (i *Inode) AsJSON() []byte {
i.RLock()
defer i.RUnlock()
data, _ := json.Marshal(SerializeableInode{
DriveItem: i.DriveItem,
Children: i.children,
Subdir: i.subdir,
Mode: i.mode,
})
return data
}
// NewInodeJSON converts JSON to a *DriveItem when loading from local storage. Not
// used with the API. FIXME: If implemented as UnmarshalJSON, this will break
// delta syncs for business accounts. Don't ask me why.
func NewInodeJSON(data []byte) (*Inode, error) {
var raw SerializeableInode
if err := json.Unmarshal(data, &raw); err != nil {
return nil, err
}
return &Inode{
DriveItem: raw.DriveItem,
children: raw.Children,
mode: raw.Mode,
subdir: raw.Subdir,
}, nil
}
// NewInodeDriveItem creates a new Inode from a DriveItem
func NewInodeDriveItem(item *graph.DriveItem) *Inode {
if item == nil {
return nil
}
return &Inode{
DriveItem: *item,
}
}
// String is only used for debugging by go-fuse
func (i *Inode) String() string {
return i.Name()
}
// Name is used to ensure thread-safe access to the NameInternal field.
func (i *Inode) Name() string {
i.RLock()
defer i.RUnlock()
return i.DriveItem.Name
}
// SetName sets the name of the item in a thread-safe manner.
func (i *Inode) SetName(name string) {
i.Lock()
i.DriveItem.Name = name
i.Unlock()
}
// NodeID returns the inodes ID in the filesystem
func (i *Inode) NodeID() uint64 {
i.RLock()
defer i.RUnlock()
return i.nodeID
}
// SetNodeID sets the inode ID for an inode if not already set. Does nothing if
// the Inode already has an ID.
func (i *Inode) SetNodeID(id uint64) uint64 {
i.Lock()
defer i.Unlock()
if i.nodeID == 0 {
i.nodeID = id
}
return i.nodeID
}
var charset = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
func randString(length int) string {
out := make([]byte, length)
for i := 0; i < length; i++ {
out[i] = charset[rand.Intn(len(charset))]
}
return string(out)
}
func localID() string {
return "local-" + randString(20)
}
func isLocalID(id string) bool {
return strings.HasPrefix(id, "local-") || id == ""
}
// ID returns the internal ID of the item
func (i *Inode) ID() string {
i.RLock()
defer i.RUnlock()
return i.DriveItem.ID
}
// ParentID returns the ID of this item's parent.
func (i *Inode) ParentID() string {
i.RLock()
defer i.RUnlock()
if i.DriveItem.Parent == nil {
return ""
}
return i.DriveItem.Parent.ID
}
// Path returns an inode's full Path
func (i *Inode) Path() string {
// special case when it's the root item
name := i.Name()
if i.ParentID() == "" && name == "root" {
return "/"
}
// all paths come prefixed with "/drive/root:"
i.RLock()
defer i.RUnlock()
if i.DriveItem.Parent == nil {
return name
}
prepath := strings.TrimPrefix(i.DriveItem.Parent.Path+"/"+name, "/drive/root:")
return strings.Replace(prepath, "//", "/", -1)
}
// HasChanges returns true if the file has local changes that haven't been
// uploaded yet.
func (i *Inode) HasChanges() bool {
i.RLock()
defer i.RUnlock()
return i.hasChanges
}
// HasChildren returns true if the item has more than 0 children
func (i *Inode) HasChildren() bool {
i.RLock()
defer i.RUnlock()
return len(i.children) > 0
}
// makeattr is a convenience function to create a set of filesystem attrs for
// use with syscalls that use or modify attrs.
func (i *Inode) makeAttr() fuse.Attr {
mtime := i.ModTime()
return fuse.Attr{
Ino: i.NodeID(),
Size: i.Size(),
Nlink: i.NLink(),
Ctime: mtime,
Mtime: mtime,
Atime: mtime,
Mode: i.Mode(),
// whatever user is running the filesystem is the owner
Owner: fuse.Owner{
Uid: uint32(os.Getuid()),
Gid: uint32(os.Getgid()),
},
}
}
// IsDir returns if it is a directory (true) or file (false).
func (i *Inode) IsDir() bool {
// 0 if the dir bit is not set
return i.Mode()&fuse.S_IFDIR > 0
}
// Mode returns the permissions/mode of the file.
func (i *Inode) Mode() uint32 {
i.RLock()
defer i.RUnlock()
if i.mode == 0 { // only 0 if fetched from Graph API
if i.DriveItem.IsDir() {
return fuse.S_IFDIR | 0755
}
return fuse.S_IFREG | 0644
}
return i.mode
}
// ModTime returns the Unix timestamp of last modification (to get a time.Time
// struct, use time.Unix(int64(d.ModTime()), 0))
func (i *Inode) ModTime() uint64 {
i.RLock()
defer i.RUnlock()
return i.DriveItem.ModTimeUnix()
}
// NLink gives the number of hard links to an inode (or child count if a
// directory)
func (i *Inode) NLink() uint32 {
if i.IsDir() {
i.RLock()
defer i.RUnlock()
// we precompute subdir due to mutex lock contention between NLink and
// other ops. subdir is modified by cache Insert/Delete and GetChildren.
return 2 + i.subdir
}
return 1
}
// Size pretends that folders are 4096 bytes, even though they're 0 (since
// they actually don't exist).
func (i *Inode) Size() uint64 {
if i.IsDir() {
return 4096
}
i.RLock()
defer i.RUnlock()
return i.DriveItem.Size
}
// Octal converts a number to its octal representation in string form.
func Octal(i uint32) string {
return strconv.FormatUint(uint64(i), 8)
}
onedriver-0.14.1/fs/inode_test.go 0000664 0000000 0000000 00000007723 14513675524 0016713 0 ustar 00root root 0000000 0000000 package fs
import (
"context"
"io/ioutil"
"path/filepath"
"testing"
"time"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/fs/graph"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// verify that items automatically get created with an ID of "local-"
func TestConstructor(t *testing.T) {
t.Parallel()
inode := NewInode("Test Create", 0644|fuse.S_IFREG, nil)
if inode.ID() == "" || !isLocalID(inode.ID()) {
t.Fatalf("Expected an ID beginning with \"local-\", got \"%s\" instaed",
inode.ID())
}
}
// verify that the mode of items fetched are correctly set when fetched from
// server
func TestMode(t *testing.T) {
t.Parallel()
item, _ := graph.GetItemPath("/Documents", auth)
inode := NewInodeDriveItem(item)
if inode.Mode() != uint32(0755|fuse.S_IFDIR) {
t.Fatalf("mode of /Documents wrong: %o != %o",
inode.Mode(), 0755|fuse.S_IFDIR)
}
fname := "/onedriver_tests/test_mode.txt"
require.NoError(t, ioutil.WriteFile("mount"+fname, []byte("test"), 0644))
var err error
for i := 0; i < 10; i++ {
item, err = graph.GetItemPath(fname, auth)
if err == nil && item != nil {
break
}
time.Sleep(time.Second)
}
if item == nil {
t.Fatal("item cannot be nil, err:", err)
}
inode = NewInodeDriveItem(item)
if inode.Mode() != uint32(0644|fuse.S_IFREG) {
t.Fatalf("mode of file wrong: %o != %o",
inode.Mode(), 0644|fuse.S_IFREG)
}
}
// Do we properly detect whether something is a directory or not?
func TestIsDir(t *testing.T) {
t.Parallel()
item, _ := graph.GetItemPath("/Documents", auth)
inode := NewInodeDriveItem(item)
if !inode.IsDir() {
t.Fatal("/Documents not detected as a directory")
}
fname := "/onedriver_tests/test_is_dir.txt"
require.NoError(t, ioutil.WriteFile("mount"+fname, []byte("test"), 0644))
assert.Eventually(t, func() bool {
item, err := graph.GetItemPath(fname, auth)
if err == nil && item != nil {
if inode := NewInodeDriveItem(item); inode.IsDir() {
t.Fatal("File created with mode 644 not detected as file")
}
return true
}
return false
}, 10*time.Second, time.Second, "Could not create item.")
}
// A filename like .~lock.libreoffice-test.docx# will fail to upload unless the
// filename is escaped.
func TestFilenameEscape(t *testing.T) {
t.Parallel()
fname := `.~lock.libreoffice-test.docx#`
require.NoError(t, ioutil.WriteFile(filepath.Join(TestDir, fname), []byte("argl bargl"), 0644))
// make sure it made it to the server
assert.Eventually(t, func() bool {
children, err := graph.GetItemChildrenPath("/onedriver_tests", auth)
require.NoError(t, err)
for _, child := range children {
if child.Name == fname {
return true
}
}
return false
}, retrySeconds, 5*time.Second, "Could not find file: ", fname)
}
// When running creat() on an existing file, we should truncate the existing file and
// return the original inode.
// Related to: https://github.com/jstaf/onedriver/issues/99
func TestDoubleCreate(t *testing.T) {
t.Parallel()
fname := "double_create.txt"
parent, err := fs.GetPath("/onedriver_tests", auth)
require.NoError(t, err)
fs.Create(
context.Background().Done(),
&fuse.CreateIn{
InHeader: fuse.InHeader{NodeId: parent.NodeID()},
Mode: 0644,
},
fname,
&fuse.CreateOut{},
)
child, err := fs.GetChild(parent.ID(), fname, auth)
// we clean up after ourselves to prevent failing some of the offline tests
defer fs.Unlink(context.Background().Done(), &fuse.InHeader{NodeId: parent.nodeID}, fname)
if err != nil || child == nil {
t.Fatal("Could not find child post-create")
}
childID := child.ID()
fs.Create(
context.Background().Done(),
&fuse.CreateIn{
InHeader: fuse.InHeader{NodeId: parent.NodeID()},
Mode: 0644,
},
fname,
&fuse.CreateOut{},
)
child, err = fs.GetChild(parent.ID(), fname, auth)
require.NoError(t, err)
if child == nil {
t.Fatal("Could not find child post-create")
}
assert.Equal(t, childID, child.ID(),
"IDs did not match when create run twice on same file.",
)
}
onedriver-0.14.1/fs/offline/ 0000775 0000000 0000000 00000000000 14513675524 0015640 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/fs/offline/offline_test.go 0000664 0000000 0000000 00000006274 14513675524 0020661 0 ustar 00root root 0000000 0000000 // This package exists purely for the convenience of easily running tests which
// test the offline functionality of the graph package.
// `unshare -nr` is used to deny network access, and then the tests are run using
// cached data from the tests in the graph package.
package offline
import (
"os"
"path/filepath"
"testing"
"github.com/jstaf/onedriver/fs"
"github.com/stretchr/testify/require"
)
// We should see more than zero items when we run ls.
func TestOfflineReaddir(t *testing.T) {
t.Parallel()
files, err := os.ReadDir(TestDir)
if err != nil {
t.Fatal(err)
}
if len(files) == 0 {
t.Fatal("Expected more than 0 files in the test directory.")
}
}
// We should find the file named bagels (from TestEchoWritesToFile)
func TestOfflineBagelDetection(t *testing.T) {
t.Parallel()
files, err := os.ReadDir(TestDir)
if err != nil {
t.Fatal(err)
}
found := false
allFiles := make([]string, 0)
for _, f := range files {
allFiles = append(allFiles, f.Name())
if f.Name() == "bagels" {
found = true
if f.IsDir() {
t.Fatal("\"bagels\" should be an ordinary file, not a directory")
}
info, _ := f.Info()
octal := fs.Octal(uint32(info.Mode().Perm()))
if octal[0] != '6' || int(octal[1])-4 < 0 || octal[2] != '4' {
// middle bit just needs to be higher than 4
// for compatibility with 022 / 002 umasks on different distros
t.Fatalf("\"bagels\" permissions bits wrong, got %s, expected 644", octal)
}
break
}
}
if !found {
t.Error("\"bagels\" not found! Expected file not present.")
t.Errorf("Got: %+v", allFiles)
}
}
// Does the contents of the bagels file match what it should?
// (File contents generated by TestEchoWritesToFile in previous tests.)
func TestOfflineBagelContents(t *testing.T) {
t.Parallel()
contents, err := os.ReadFile(filepath.Join(TestDir, "bagels"))
require.NoError(t, err)
require.Equal(t, []byte("bagels\n"), contents, "Offline file contents did not match.")
}
// Creating a file should fail
func TestOfflineFileCreation(t *testing.T) {
t.Parallel()
require.Error(t,
os.WriteFile(filepath.Join(TestDir, "donuts"), []byte("fail me"), 0644),
"Writing a file while offline should fail.",
)
}
// Modifying a file offline should fail.
func TestOfflineFileModification(t *testing.T) {
t.Parallel()
require.Error(t,
os.WriteFile(filepath.Join(TestDir, "bagels"), []byte("fail me too"), 0644),
"Modifying a file while offline should fail.",
)
}
// Deleting a file offline should fail.
func TestOfflineFileDeletion(t *testing.T) {
t.Parallel()
if os.Remove(filepath.Join(TestDir, "write.txt")) == nil {
t.Error("Deleting a file while offline should fail.")
}
if os.Remove(filepath.Join(TestDir, "empty")) == nil {
t.Error("Deleting an empty file while offline should fail.")
}
}
// Creating a directory offline should fail.
func TestOfflineMkdir(t *testing.T) {
t.Parallel()
if os.Mkdir(filepath.Join(TestDir, "offline_dir"), 0755) == nil {
t.Fatal("Creating a directory should have failed offline.")
}
}
// Deleting a directory offline should fail.
func TestOfflineRmdir(t *testing.T) {
t.Parallel()
if os.Remove(filepath.Join(TestDir, "folder1")) == nil {
t.Fatal("Removing a directory should have failed offline.")
}
}
onedriver-0.14.1/fs/offline/setup_test.go 0000664 0000000 0000000 00000004441 14513675524 0020371 0 ustar 00root root 0000000 0000000 package offline
import (
"fmt"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"syscall"
"testing"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/fs"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
const (
mountLoc = "mount"
testDBLoc = "tmp"
TestDir = mountLoc + "/onedriver_tests"
)
var auth *graph.Auth
// Like the graph package, but designed for running tests offline.
func TestMain(m *testing.M) {
if wd, _ := os.Getwd(); strings.HasSuffix(wd, "/offline") {
// depending on how this test gets launched, the working directory can be wrong
os.Chdir("../..")
}
// attempt to unmount regardless of what happens (in case previous tests
// failed and didn't clean themselves up)
exec.Command("fusermount", "-uz", mountLoc).Run()
os.Mkdir(mountLoc, 0755)
auth = graph.Authenticate(graph.AuthConfig{}, ".auth_tokens.json", false)
inode, err := graph.GetItem("root", auth)
if inode != nil || !graph.IsOffline(err) {
fmt.Println("These tests must be run offline.")
os.Exit(1)
}
f, _ := os.OpenFile("fusefs_tests.log", os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0644)
zerolog.SetGlobalLevel(zerolog.TraceLevel)
log.Logger = log.Output(zerolog.ConsoleWriter{Out: f, TimeFormat: "15:04:05"})
defer f.Close()
log.Info().Msg("Setup offline tests ------------------------------")
// reuses the cached data from the previous tests
server, _ := fuse.NewServer(
fs.NewFilesystem(auth, filepath.Join(testDBLoc, "test")),
mountLoc,
&fuse.MountOptions{
Name: "onedriver",
FsName: "onedriver",
DisableXAttrs: true,
MaxBackground: 1024,
},
)
// setup sigint handler for graceful unmount on interrupt/terminate
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGABRT)
go fs.UnmountHandler(sigChan, server)
// mount fs in background thread
go server.Serve()
log.Info().Msg("Start offline tests ------------------------------")
code := m.Run()
log.Info().Msg("Finish offline tests ------------------------------")
if server.Unmount() != nil {
log.Error().Msg("Failed to unmount test fuse server, attempting lazy unmount")
exec.Command("fusermount", "-zu", "mount").Run()
}
fmt.Println("Successfully unmounted fuse server!")
os.Exit(code)
}
onedriver-0.14.1/fs/setup_test.go 0000664 0000000 0000000 00000007676 14513675524 0016764 0 ustar 00root root 0000000 0000000 package fs
import (
"fmt"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"syscall"
"testing"
"time"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
const (
mountLoc = "mount"
testDBLoc = "tmp"
TestDir = mountLoc + "/onedriver_tests"
DeltaDir = TestDir + "/delta"
retrySeconds = 60 * time.Second //lint:ignore ST1011 a
)
var (
auth *graph.Auth
fs *Filesystem
)
// Tests are done in the main project directory with a mounted filesystem to
// avoid having to repeatedly recreate auth_tokens.json and juggle multiple auth
// sessions.
func TestMain(m *testing.M) {
// determine if we're running a single test in vscode or something
var singleTest bool
for _, arg := range os.Args {
if strings.Contains(arg, "-test.run") {
singleTest = true
}
}
os.Chdir("..")
// attempt to unmount regardless of what happens (in case previous tests
// failed and didn't clean themselves up)
exec.Command("fusermount", "-uz", mountLoc).Run()
os.Mkdir(mountLoc, 0755)
// wipe all cached data from previous tests
os.RemoveAll(testDBLoc)
os.Mkdir(testDBLoc, 0755)
f, _ := os.OpenFile("fusefs_tests.log", os.O_TRUNC|os.O_CREATE|os.O_RDWR, 0644)
zerolog.SetGlobalLevel(zerolog.TraceLevel)
log.Logger = log.Output(zerolog.ConsoleWriter{Out: f, TimeFormat: "15:04:05"})
defer f.Close()
auth = graph.Authenticate(graph.AuthConfig{}, ".auth_tokens.json", false)
fs = NewFilesystem(auth, filepath.Join(testDBLoc, "test"))
server, _ := fuse.NewServer(
fs,
mountLoc,
&fuse.MountOptions{
Name: "onedriver",
FsName: "onedriver",
DisableXAttrs: true,
MaxBackground: 1024,
},
)
// setup sigint handler for graceful unmount on interrupt/terminate
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGABRT)
go UnmountHandler(sigChan, server)
// mount fs in background thread
go server.Serve()
// cleanup from last run
log.Info().Msg("Setup test environment ---------------------------------")
if err := os.RemoveAll(TestDir); err != nil {
fmt.Println(err)
os.Exit(1)
}
os.Mkdir(TestDir, 0755)
os.Mkdir(DeltaDir, 0755)
// create paging test files before the delta thread is created
if !singleTest {
os.Mkdir(filepath.Join(TestDir, "paging"), 0755)
createPagingTestFiles()
}
go fs.DeltaLoop(5 * time.Second)
// not created by default on onedrive for business
os.Mkdir(mountLoc+"/Documents", 0755)
// we do not cd into the mounted directory or it will hang indefinitely on
// unmount with "device or resource busy"
log.Info().Msg("Test session start ---------------------------------")
// run tests
code := m.Run()
log.Info().Msg("Test session end -----------------------------------")
fmt.Printf("Waiting 5 seconds for any remaining uploads to complete")
for i := 0; i < 5; i++ {
time.Sleep(time.Second)
fmt.Printf(".")
}
fmt.Printf("\n")
// unmount
if server.Unmount() != nil {
log.Error().Msg("Failed to unmount test fuse server, attempting lazy unmount")
exec.Command("fusermount", "-zu", "mount").Run()
}
fmt.Println("Successfully unmounted fuse server!")
os.Exit(code)
}
// Apparently 200 reqests is the default paging limit.
// Upload at least this many for a later test before the delta thread is created.
func createPagingTestFiles() {
fmt.Println("Setting up paging test files.")
var group sync.WaitGroup
var errCounter int64
for i := 0; i < 250; i++ {
group.Add(1)
go func(n int, wg *sync.WaitGroup) {
_, err := graph.Put(
graph.ResourcePath(fmt.Sprintf("/onedriver_tests/paging/%d.txt", n))+":/content",
auth,
strings.NewReader("test\n"),
)
if err != nil {
log.Error().Err(err).Msg("Paging upload fail.")
atomic.AddInt64(&errCounter, 1)
}
wg.Done()
}(i, &group)
}
group.Wait()
log.Info().Msgf("%d failed paging uploads.\n", errCounter)
fmt.Println("Finished with paging test setup.")
}
onedriver-0.14.1/fs/signal_handlers.go 0000664 0000000 0000000 00000001141 14513675524 0017677 0 ustar 00root root 0000000 0000000 package fs
import (
"os"
"strings"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/rs/zerolog/log"
)
// UnmountHandler should be used as goroutine that will handle sigint then exit gracefully
func UnmountHandler(signal <-chan os.Signal, server *fuse.Server) {
sig := <-signal // block until signal
log.Info().Str("signal", strings.ToUpper(sig.String())).
Msg("Signal received, unmounting filesystem.")
err := server.Unmount()
if err != nil {
log.Error().Err(err).Msg("Failed to unmount filesystem cleanly! " +
"Run \"fusermount -uz /MOUNTPOINT/GOES/HERE\" to unmount.")
}
os.Exit(128)
}
onedriver-0.14.1/fs/upload_manager.go 0000664 0000000 0000000 00000012662 14513675524 0017532 0 ustar 00root root 0000000 0000000 package fs
import (
"encoding/json"
"time"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog/log"
bolt "go.etcd.io/bbolt"
)
const maxUploadsInFlight = 5
var bucketUploads = []byte("uploads")
// UploadManager is used to manage and retry uploads.
type UploadManager struct {
queue chan *UploadSession
deletionQueue chan string
sessions map[string]*UploadSession
inFlight uint8 // number of sessions in flight
auth *graph.Auth
fs *Filesystem
db *bolt.DB
}
// NewUploadManager creates a new queue/thread for uploads
func NewUploadManager(duration time.Duration, db *bolt.DB, fs *Filesystem, auth *graph.Auth) *UploadManager {
manager := UploadManager{
queue: make(chan *UploadSession),
deletionQueue: make(chan string, 1000), // FIXME - why does this chan need to be buffered now???
sessions: make(map[string]*UploadSession),
auth: auth,
db: db,
fs: fs,
}
db.View(func(tx *bolt.Tx) error {
// Add any incomplete sessions from disk - any sessions here were never
// finished. The most likely cause of this is that the user shut off
// their computer or closed the program after starting the upload.
b := tx.Bucket(bucketUploads)
if b == nil {
// bucket does not exist yet, bail out early
return nil
}
return b.ForEach(func(key []byte, val []byte) error {
session := &UploadSession{}
err := json.Unmarshal(val, session)
if err != nil {
log.Error().Err(err).Msg("Failure restoring upload sessions from disk.")
return err
}
if session.getState() != uploadNotStarted {
manager.inFlight++
}
session.cancel(auth) // uploads are currently non-resumable
manager.sessions[session.ID] = session
return nil
})
})
go manager.uploadLoop(duration)
return &manager
}
// uploadLoop manages the deduplication and tracking of uploads
func (u *UploadManager) uploadLoop(duration time.Duration) {
ticker := time.NewTicker(duration)
for {
select {
case session := <-u.queue: // new sessions
// deduplicate sessions for the same item
if old, exists := u.sessions[session.ID]; exists {
old.cancel(u.auth)
}
contents, _ := json.Marshal(session)
u.db.Batch(func(tx *bolt.Tx) error {
// persist to disk in case the user shuts off their computer or
// kills onedriver prematurely
b, _ := tx.CreateBucketIfNotExists(bucketUploads)
return b.Put([]byte(session.ID), contents)
})
u.sessions[session.ID] = session
case cancelID := <-u.deletionQueue: // remove uploads for deleted items
u.finishUpload(cancelID)
case <-ticker.C: // periodically start uploads, or remove them if done/failed
for _, session := range u.sessions {
switch session.getState() {
case uploadNotStarted:
// max active upload sessions are capped at this limit for faster
// uploads of individual files and also to prevent possible server-
// side throttling that can cause errors.
if u.inFlight < maxUploadsInFlight {
u.inFlight++
go session.Upload(u.auth)
}
case uploadErrored:
session.retries++
if session.retries > 5 {
log.Error().
Str("id", session.ID).
Str("name", session.Name).
Err(session).
Int("retries", session.retries).
Msg("Upload session failed too many times, cancelling session.")
u.finishUpload(session.ID)
}
log.Warn().
Str("id", session.ID).
Str("name", session.Name).
Err(session).
Msg("Upload session failed, will retry from beginning.")
session.cancel(u.auth) // cancel large sessions
session.setState(uploadNotStarted, nil)
case uploadComplete:
log.Info().
Str("id", session.ID).
Str("oldID", session.OldID).
Str("name", session.Name).
Msg("Upload completed!")
// ID changed during upload, move to new ID
if session.OldID != session.ID {
err := u.fs.MoveID(session.OldID, session.ID)
if err != nil {
log.Error().
Str("id", session.ID).
Str("oldID", session.OldID).
Str("name", session.Name).
Err(err).
Msg("Could not move inode to new ID!")
}
}
// inode will exist at the new ID now, but we check if inode
// is nil to see if the item has been deleted since upload start
if inode := u.fs.GetID(session.ID); inode != nil {
inode.Lock()
inode.DriveItem.ETag = session.ETag
inode.Unlock()
}
// the old ID is the one that was used to add it to the queue.
// cleanup the session.
u.finishUpload(session.OldID)
}
}
}
}
}
// QueueUpload queues an item for upload.
func (u *UploadManager) QueueUpload(inode *Inode) error {
data := u.fs.getInodeContent(inode)
session, err := NewUploadSession(inode, data)
if err == nil {
u.queue <- session
}
return err
}
// CancelUpload is used to kill any pending uploads for a session
func (u *UploadManager) CancelUpload(id string) {
u.deletionQueue <- id
}
// finishUpload is an internal method that gets called when a session is
// completed. It cancels the session if one was in progress, and then deletes
// it from both memory and disk.
func (u *UploadManager) finishUpload(id string) {
if session, exists := u.sessions[id]; exists {
session.cancel(u.auth)
}
u.db.Batch(func(tx *bolt.Tx) error {
if b := tx.Bucket(bucketUploads); b != nil {
b.Delete([]byte(id))
}
return nil
})
if u.inFlight > 0 {
u.inFlight--
}
delete(u.sessions, id)
}
onedriver-0.14.1/fs/upload_manager_test.go 0000664 0000000 0000000 00000007106 14513675524 0020566 0 ustar 00root root 0000000 0000000 package fs
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/jstaf/onedriver/fs/graph"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
bolt "go.etcd.io/bbolt"
)
// Test that new uploads are written to disk to support resuming them later if
// the user shuts down their computer.
func TestUploadDiskSerialization(t *testing.T) {
t.Parallel()
// write a file and get its id - we do this as a goroutine because uploads are
// blocking now
go exec.Command("cp", "dmel.fa", filepath.Join(TestDir, "upload_to_disk.fa")).Run()
time.Sleep(time.Second)
inode, err := fs.GetPath("/onedriver_tests/upload_to_disk.fa", nil)
require.NoError(t, err)
// we can find the in-progress upload because there is a several second
// delay on new uploads
session := UploadSession{}
err = fs.db.Batch(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists(bucketUploads)
diskSession := b.Get([]byte(inode.ID()))
if diskSession == nil {
return errors.New("item to upload not found on disk")
}
return json.Unmarshal(diskSession, &session)
})
if err != nil {
t.Log(err)
t.Log("This test sucks and should be rewritten to be less race-y!")
t.SkipNow()
}
// kill the session before it gets uploaded
fs.uploads.CancelUpload(session.ID)
// confirm that the file didn't get uploaded yet (just in case!)
driveItem, err := graph.GetItemPath("/onedriver_tests/upload_to_disk.fa", auth)
if err == nil || driveItem != nil {
if driveItem.Size > 0 {
t.Fatal("This test should be rewritten, the file was uploaded before " +
"the upload could be canceled.")
}
}
// now we create a new UploadManager from scratch, with the file injected
// into its db and confirm that the file gets uploaded
db, err := bolt.Open(filepath.Join(testDBLoc, "test_upload_disk_serialization.db"), 0644, nil)
require.NoError(t, err)
db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucket(bucketUploads)
payload, _ := json.Marshal(&session)
return b.Put([]byte(session.ID), payload)
})
NewUploadManager(time.Second, db, fs, auth)
assert.Eventually(t, func() bool {
driveItem, err = graph.GetItemPath("/onedriver_tests/upload_to_disk.fa", auth)
if driveItem != nil && err == nil {
return true
}
return false
}, 100*time.Second, 5*time.Second,
"Could not find uploaded file after unserializing from disk and resuming upload.",
)
}
// Make sure that uploading the same file multiple times works exactly as it should.
func TestRepeatedUploads(t *testing.T) {
t.Parallel()
// test setup
fname := filepath.Join(TestDir, "repeated_upload.txt")
require.NoError(t, ioutil.WriteFile(fname, []byte("initial content"), 0644))
var inode *Inode
require.Eventually(t, func() bool {
inode, _ = fs.GetPath("/onedriver_tests/repeated_upload.txt", auth)
return !isLocalID(inode.ID())
}, retrySeconds, 2*time.Second, "ID was local after upload.")
for i := 0; i < 5; i++ {
uploadme := []byte(fmt.Sprintf("iteration: %d", i))
require.NoError(t, ioutil.WriteFile(fname, uploadme, 0644))
time.Sleep(5 * time.Second)
item, err := graph.GetItemPath("/onedriver_tests/repeated_upload.txt", auth)
require.NoError(t, err)
content, _, err := graph.GetItemContent(item.ID, auth)
require.NoError(t, err)
if !bytes.Equal(content, uploadme) {
// wait and retry once
time.Sleep(5 * time.Second)
content, _, err := graph.GetItemContent(item.ID, auth)
require.NoError(t, err)
if !bytes.Equal(content, uploadme) {
t.Fatalf("Upload failed - got \"%s\", wanted \"%s\"", content, uploadme)
}
}
}
}
onedriver-0.14.1/fs/upload_session.go 0000664 0000000 0000000 00000025251 14513675524 0017601 0 ustar 00root root 0000000 0000000 package fs
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/jstaf/onedriver/fs/graph"
"github.com/rs/zerolog/log"
)
const (
// 10MB is the recommended upload size according to the graph API docs
uploadChunkSize uint64 = 10 * 1024 * 1024
// uploads larget than 4MB must use a formal upload session
uploadLargeSize uint64 = 4 * 1024 * 1024
)
// upload states
const (
uploadNotStarted = iota
uploadStarted
uploadComplete
uploadErrored
)
// UploadSession contains a snapshot of the file we're uploading. We have to
// take the snapshot or the file may have changed on disk during upload (which
// would break the upload). It is not recommended to directly deserialize into
// this structure from API responses in case Microsoft ever adds a size, data,
// or modTime field to the response.
type UploadSession struct {
ID string `json:"id"`
OldID string `json:"oldID"`
ParentID string `json:"parentID"`
NodeID uint64 `json:"nodeID"`
Name string `json:"name"`
ExpirationDateTime time.Time `json:"expirationDateTime"`
Size uint64 `json:"size,omitempty"`
Data []byte `json:"data,omitempty"`
QuickXORHash string `json:"quickxorhash,omitempty"`
ModTime time.Time `json:"modTime,omitempty"`
retries int
sync.Mutex
UploadURL string `json:"uploadUrl"`
ETag string `json:"eTag,omitempty"`
state int
error // embedded error tracks errors that killed an upload
}
// MarshalJSON implements a custom JSON marshaler to avoid race conditions
func (u *UploadSession) MarshalJSON() ([]byte, error) {
u.Lock()
defer u.Unlock()
type SerializeableUploadSession UploadSession
return json.Marshal((*SerializeableUploadSession)(u))
}
// UploadSessionPost is the initial post used to create an upload session
type UploadSessionPost struct {
Name string `json:"name,omitempty"`
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior,omitempty"`
FileSystemInfo `json:"fileSystemInfo,omitempty"`
}
// FileSystemInfo carries the filesystem metadata like Mtime/Atime
type FileSystemInfo struct {
LastModifiedDateTime time.Time `json:"lastModifiedDateTime,omitempty"`
}
func (u *UploadSession) getState() int {
u.Lock()
defer u.Unlock()
return u.state
}
// setState is just a helper method to set the UploadSession state and make error checking
// a little more straightforwards.
func (u *UploadSession) setState(state int, err error) error {
u.Lock()
u.state = state
u.error = err
u.Unlock()
return err
}
// NewUploadSession wraps an upload of a file into an UploadSession struct
// responsible for performing uploads for a file.
func NewUploadSession(inode *Inode, data *[]byte) (*UploadSession, error) {
if data == nil {
return nil, errors.New("data to upload cannot be nil")
}
// create a generic session for all files
inode.RLock()
session := UploadSession{
ID: inode.DriveItem.ID,
OldID: inode.DriveItem.ID,
ParentID: inode.DriveItem.Parent.ID,
NodeID: inode.nodeID,
Name: inode.DriveItem.Name,
Data: *data,
ModTime: *inode.DriveItem.ModTime,
}
inode.RUnlock()
session.Size = uint64(len(*data)) // just in case it somehow differs
session.QuickXORHash = graph.QuickXORHash(data)
return &session, nil
}
// cancel the upload session by deleting the temp file at the endpoint.
func (u *UploadSession) cancel(auth *graph.Auth) {
u.Lock()
// small upload sessions will also have an empty UploadURL in addition to
// uninitialized large file uploads.
nonemptyURL := u.UploadURL != ""
u.Unlock()
if nonemptyURL {
state := u.getState()
if state == uploadStarted || state == uploadErrored {
// dont care about result, this is purely us being polite to the server
go graph.Delete(u.UploadURL, auth)
}
}
}
// Internal method used for uploading individual chunks of a DriveItem. We have
// to make things this way because the internal Put func doesn't work all that
// well when we need to add custom headers. Will return without an error if
// irrespective of HTTP status (errors are reserved for stuff that prevented
// the HTTP request at all).
func (u *UploadSession) uploadChunk(auth *graph.Auth, offset uint64) ([]byte, int, error) {
u.Lock()
url := u.UploadURL
if url == "" {
u.Unlock()
return nil, -1, errors.New("UploadSession UploadURL cannot be empty")
}
u.Unlock()
// how much of the file are we going to upload?
end := offset + uploadChunkSize
var reqChunkSize uint64
if end > u.Size {
end = u.Size
reqChunkSize = end - offset + 1
}
if offset > u.Size {
return nil, -1, errors.New("offset cannot be larger than DriveItem size")
}
auth.Refresh()
client := &http.Client{}
request, _ := http.NewRequest(
"PUT",
url,
bytes.NewReader((u.Data)[offset:end]),
)
// no Authorization header - it will throw a 401 if present
request.Header.Add("Content-Length", strconv.Itoa(int(reqChunkSize)))
frags := fmt.Sprintf("bytes %d-%d/%d", offset, end-1, u.Size)
log.Info().Str("id", u.ID).Msg("Uploading " + frags)
request.Header.Add("Content-Range", frags)
resp, err := client.Do(request)
if err != nil {
// this is a serious error, not simply one with a non-200 return code
return nil, -1, err
}
defer resp.Body.Close()
response, _ := ioutil.ReadAll(resp.Body)
return response, resp.StatusCode, nil
}
// Upload copies the file's contents to the server. Should only be called as a
// goroutine, or it can potentially block for a very long time. The uploadSession.error
// field contains errors to be handled if called as a goroutine.
func (u *UploadSession) Upload(auth *graph.Auth) error {
log.Info().Str("id", u.ID).Str("name", u.Name).Msg("Uploading file.")
u.setState(uploadStarted, nil)
var uploadPath string
var resp []byte
if u.Size < uploadLargeSize {
// Small upload sessions use a simple PUT request, but this does not support
// adding file modification times. We don't really care though, because
// after some experimentation, the Microsoft API doesn't seem to properly
// support these either (this is why we have to use etags).
if isLocalID(u.ID) {
uploadPath = fmt.Sprintf(
"/me/drive/items/%s:/%s:/content",
url.PathEscape(u.ParentID),
url.PathEscape(u.Name),
)
} else {
uploadPath = fmt.Sprintf(
"/me/drive/items/%s/content",
url.PathEscape(u.ID),
)
}
// small files handled in this block
var err error
resp, err = graph.Put(uploadPath, auth, bytes.NewReader(u.Data))
if err != nil && strings.Contains(err.Error(), "resourceModified") {
// retry the request after a second, likely the server is having issues
time.Sleep(time.Second)
resp, err = graph.Put(uploadPath, auth, bytes.NewReader(u.Data))
}
if err != nil {
return u.setState(uploadErrored, fmt.Errorf("small upload failed: %w", err))
}
} else {
if isLocalID(u.ID) {
uploadPath = fmt.Sprintf(
"/me/drive/items/%s:/%s:/createUploadSession",
url.PathEscape(u.ParentID),
url.PathEscape(u.Name),
)
} else {
uploadPath = fmt.Sprintf(
"/me/drive/items/%s/createUploadSession",
url.PathEscape(u.ID),
)
}
sessionPostData, _ := json.Marshal(UploadSessionPost{
ConflictBehavior: "replace",
FileSystemInfo: FileSystemInfo{
LastModifiedDateTime: u.ModTime,
},
})
resp, err := graph.Post(uploadPath, auth, bytes.NewReader(sessionPostData))
if err != nil {
return u.setState(uploadErrored, fmt.Errorf("failed to create upload session: %w", err))
}
// populate UploadURL/expiration - we unmarshal into a fresh session here
// just in case the API does something silly at a later date and overwrites
// a field it shouldn't.
tmp := UploadSession{}
if err = json.Unmarshal(resp, &tmp); err != nil {
return u.setState(uploadErrored,
fmt.Errorf("could not unmarshal upload session post response: %w", err))
}
u.Lock()
u.UploadURL = tmp.UploadURL
u.ExpirationDateTime = tmp.ExpirationDateTime
u.Unlock()
// api upload session created successfully, now do actual content upload
var status int
nchunks := int(math.Ceil(float64(u.Size) / float64(uploadChunkSize)))
for i := 0; i < nchunks; i++ {
resp, status, err = u.uploadChunk(auth, uint64(i)*uploadChunkSize)
if err != nil {
return u.setState(uploadErrored, fmt.Errorf("failed to perform chunk upload: %w", err))
}
// retry server-side failures with an exponential back-off strategy. Will not
// exit this loop unless it receives a non 5xx error or serious failure
for backoff := 1; status >= 500; backoff *= 2 {
log.Error().
Str("id", u.ID).
Str("name", u.Name).
Int("chunk", i).
Int("nchunks", nchunks).
Int("status", status).
Msgf("The OneDrive server is having issues, retrying chunk upload in %ds.", backoff)
time.Sleep(time.Duration(backoff) * time.Second)
resp, status, err = u.uploadChunk(auth, uint64(i)*uploadChunkSize)
if err != nil { // a serious, non 4xx/5xx error
return u.setState(uploadErrored, fmt.Errorf("failed to perform chunk upload: %w", err))
}
}
// handle client-side errors
if status >= 400 {
return u.setState(uploadErrored, fmt.Errorf("error uploading chunk - HTTP %d: %s", status, string(resp)))
}
}
}
// server has indicated that the upload was successful - now we check to verify the
// checksum is what it's supposed to be.
remote := graph.DriveItem{}
if err := json.Unmarshal(resp, &remote); err != nil {
if len(resp) == 0 {
// the API frequently just returns a 0-byte response for completed
// multipart uploads, so we manually fetch the newly updated item
var remotePtr *graph.DriveItem
if isLocalID(u.ID) {
remotePtr, err = graph.GetItemChild(u.ParentID, u.Name, auth)
} else {
remotePtr, err = graph.GetItem(u.ID, auth)
}
if err == nil {
remote = *remotePtr
} else {
return u.setState(uploadErrored,
fmt.Errorf("failed to get item post-upload: %w", err))
}
} else {
return u.setState(uploadErrored,
fmt.Errorf("could not unmarshal response: %w: %s", err, string(resp)),
)
}
}
if remote.File == nil && remote.Size != u.Size {
// if we are absolutely pounding the microsoft API, a remote item may sometimes
// come back without checksums, so we check the size of the uploaded item instead.
return u.setState(uploadErrored, errors.New("size mismatch when remote checksums did not exist"))
} else if !remote.VerifyChecksum(u.QuickXORHash) {
return u.setState(uploadErrored, errors.New("remote checksum did not match"))
}
// update the UploadSession's ID in the event that we exchange a local for a remote ID
u.Lock()
u.ID = remote.ID
u.ETag = remote.ETag
u.Unlock()
return u.setState(uploadComplete, nil)
}
onedriver-0.14.1/fs/upload_session_test.go 0000664 0000000 0000000 00000011442 14513675524 0020635 0 ustar 00root root 0000000 0000000 package fs
import (
"bytes"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/jstaf/onedriver/fs/graph"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TestUploadSession verifies that the basic functionality of uploads works correctly.
func TestUploadSession(t *testing.T) {
t.Parallel()
testDir, err := fs.GetPath("/onedriver_tests", auth)
require.NoError(t, err)
inode := NewInode("uploadSessionSmall.txt", 0644, testDir)
data := []byte("our super special data")
inode.setContent(fs, data)
mtime := inode.ModTime()
session, err := NewUploadSession(inode, &data)
require.NoError(t, err)
err = session.Upload(auth)
require.NoError(t, err)
if isLocalID(session.ID) {
t.Fatalf("The session's ID was somehow still local following an upload: %s\n",
session.ID)
}
if sessionMtime := uint64(session.ModTime.Unix()); sessionMtime != mtime {
t.Errorf("session modtime changed - before: %d - after: %d", mtime, sessionMtime)
}
resp, _, err := graph.GetItemContent(session.ID, auth)
require.NoError(t, err)
if !bytes.Equal(data, resp) {
t.Fatalf("Data mismatch. Original content: %s\nRemote content: %s\n", data, resp)
}
// item now has a new id following the upload. We just change the ID here
// because thats part of the UploadManager functionality and gets tested elsewhere.
inode.DriveItem.ID = session.ID
// we overwrite and upload again to test uploading with the new remote id
newData := []byte("new data is extra long so it covers the old one completely")
inode.setContent(fs, newData)
session2, err := NewUploadSession(inode, &newData)
require.NoError(t, err)
err = session2.Upload(auth)
require.NoError(t, err)
resp, _, err = graph.GetItemContent(session.ID, auth)
require.NoError(t, err)
if !bytes.Equal(newData, resp) {
t.Fatalf("Data mismatch. Original content: %s\nRemote content: %s\n", newData, resp)
}
}
// TestUploadSessionSmallFS verifies is the same test as TestUploadSessionSmall, but uses
// the filesystem itself to perform the uploads instead of testing the internal upload
// functions directly
func TestUploadSessionSmallFS(t *testing.T) {
t.Parallel()
data := []byte("super special data for upload test 2")
err := ioutil.WriteFile(filepath.Join(TestDir, "uploadSessionSmallFS.txt"), data, 0644)
require.NoError(t, err)
time.Sleep(10 * time.Second)
item, err := graph.GetItemPath("/onedriver_tests/uploadSessionSmallFS.txt", auth)
if err != nil || item == nil {
t.Fatal(err)
}
content, _, err := graph.GetItemContent(item.ID, auth)
require.NoError(t, err)
if !bytes.Equal(content, data) {
t.Fatalf("Data mismatch. Original content: %s\nRemote content: %s\n", data, content)
}
// upload it again to ensure uploads with an existing remote id succeed
data = []byte("more super special data")
err = ioutil.WriteFile(filepath.Join(TestDir, "uploadSessionSmallFS.txt"), data, 0644)
require.NoError(t, err)
time.Sleep(15 * time.Second)
item2, err := graph.GetItemPath("/onedriver_tests/uploadSessionSmallFS.txt", auth)
if err != nil || item == nil {
t.Fatal(err)
}
content, _, err = graph.GetItemContent(item2.ID, auth)
require.NoError(t, err)
if !bytes.Equal(content, data) {
t.Fatalf("Data mismatch. Original content: %s\nRemote content: %s\n", data, content)
}
}
// copy large file inside onedrive mount, then verify that we can still
// access selected lines
func TestUploadSessionLargeFS(t *testing.T) {
t.Parallel()
fname := filepath.Join(TestDir, "dmel.fa")
require.NoError(t, exec.Command("cp", "dmel.fa", fname).Run())
contents, err := ioutil.ReadFile(fname)
require.NoError(t, err)
header := ">X dna:chromosome chromosome:BDGP6.22:X:1:23542271:1 REF"
if string(contents[:len(header)]) != header {
t.Fatalf("Could not read FASTA header. Wanted \"%s\", got \"%s\"\n",
header, string(contents[:len(header)]))
}
final := "AAATAAAATAC\n" // makes yucky test output, but is the final line
match := string(contents[len(contents)-len(final):])
if match != final {
t.Fatalf("Could not read final line of FASTA. Wanted \"%s\", got \"%s\"\n",
final, match)
}
st, _ := os.Stat(fname)
if st.Size() == 0 {
t.Fatal("File size cannot be 0.")
}
// poll endpoint to make sure it has a size greater than 0
size := uint64(len(contents))
var item *graph.DriveItem
assert.Eventually(t, func() bool {
item, _ = graph.GetItemPath("/onedriver_tests/dmel.fa", auth)
inode := NewInodeDriveItem(item)
return item != nil && inode.Size() == size
}, 120*time.Second, time.Second, "Upload session did not complete successfully!")
// test multipart downloads as a bonus part of the test
downloaded, _, err := graph.GetItemContent(item.ID, auth)
assert.NoError(t, err)
assert.Equal(t, graph.QuickXORHash(&contents), graph.QuickXORHash(&downloaded),
"Downloaded content did not match original content.")
}
onedriver-0.14.1/go.mod 0000664 0000000 0000000 00000000576 14513675524 0014724 0 ustar 00root root 0000000 0000000 module github.com/jstaf/onedriver
require (
github.com/coreos/go-systemd/v22 v22.3.2
github.com/godbus/dbus/v5 v5.0.6
github.com/gotk3/gotk3 v0.6.1
github.com/hanwen/go-fuse/v2 v2.1.0
github.com/imdario/mergo v0.3.13
github.com/rs/zerolog v1.26.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.7.0
go.etcd.io/bbolt v1.3.6
gopkg.in/yaml.v3 v3.0.1
)
go 1.13
onedriver-0.14.1/go.sum 0000664 0000000 0000000 00000014102 14513675524 0014737 0 ustar 00root root 0000000 0000000 github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gotk3/gotk3 v0.6.1 h1:GJ400a0ecEEWrzjBvzBzH+pB/esEMIGdB9zPSmBdoeo=
github.com/gotk3/gotk3 v0.6.1/go.mod h1:/hqFpkNa9T3JgNAE2fLvCdov7c5bw//FHNZrZ3Uv9/Q=
github.com/hanwen/go-fuse v1.0.0 h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc=
github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
github.com/hanwen/go-fuse/v2 v2.1.0 h1:+32ffteETaLYClUj0a3aHjZ1hOPxxaNEHiZiujuDaek=
github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e h1:WUoyKPm6nCo1BnNUvPGnFG3T5DUVem42yDJZZ4CNxMA=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
onedriver-0.14.1/onedriver.spec 0000664 0000000 0000000 00000023513 14513675524 0016463 0 ustar 00root root 0000000 0000000 Name: onedriver
Version: 0.14.1
Release: 1%{?dist}
Summary: A native Linux filesystem for Microsoft Onedrive
License: GPL-3.0-or-later
URL: https://github.com/jstaf/onedriver
Source0: https://github.com/jstaf/onedriver/archive/refs/tags/v%{version}.tar.gz
%if 0%{?suse_version}
BuildRequires: go >= 1.17
%else
BuildRequires: golang >= 1.17.0
%endif
BuildRequires: git
BuildRequires: gcc
BuildRequires: pkg-config
BuildRequires: webkit2gtk3-devel
Requires: fuse
%description
Onedriver is a native Linux filesystem for Microsoft Onedrive. Files and
metadata are downloaded on-demand instead of syncing the entire drive to
your local computer.
%prep
%autosetup
%build
bash cgo-helper.sh
if rpm -q pango | grep -q 1.42; then
BUILD_TAGS=-tags=pango_1_42,gtk_3_22
fi
go build -v -mod=vendor $BUILD_TAGS \
-ldflags="-X github.com/jstaf/onedriver/cmd/common.commit=$(cat .commit)" \
./cmd/onedriver
go build -v -mod=vendor $BUILD_TAGS \
-ldflags="-X github.com/jstaf/onedriver/cmd/common.commit=$(cat .commit)" \
./cmd/onedriver-launcher
gzip pkg/resources/onedriver.1
%install
rm -rf $RPM_BUILD_ROOT
mkdir -p %{buildroot}/%{_bindir}
mkdir -p %{buildroot}/usr/share/icons/%{name}
mkdir -p %{buildroot}/usr/share/applications
mkdir -p %{buildroot}/usr/lib/systemd/user
mkdir -p %{buildroot}/usr/share/man/man1
cp %{name} %{buildroot}/%{_bindir}
cp %{name}-launcher %{buildroot}/%{_bindir}
cp pkg/resources/%{name}.png %{buildroot}/usr/share/icons/%{name}
cp pkg/resources/%{name}-128.png %{buildroot}/usr/share/icons/%{name}
cp pkg/resources/%{name}.svg %{buildroot}/usr/share/icons/%{name}
cp pkg/resources/%{name}.desktop %{buildroot}/usr/share/applications
cp pkg/resources/%{name}@.service %{buildroot}/usr/lib/systemd/user
cp pkg/resources/%{name}.1.gz %{buildroot}/usr/share/man/man1
# fix for el8 build in mock
%define _empty_manifest_terminate_build 0
%files
%defattr(-,root,root,-)
%attr(755, root, root) %{_bindir}/%{name}
%attr(755, root, root) %{_bindir}/%{name}-launcher
%dir /usr/share/icons/%{name}
%attr(644, root, root) /usr/share/icons/%{name}/%{name}.png
%attr(644, root, root) /usr/share/icons/%{name}/%{name}-128.png
%attr(644, root, root) /usr/share/icons/%{name}/%{name}.svg
%attr(644, root, root) /usr/share/applications/%{name}.desktop
%attr(644, root, root) /usr/lib/systemd/user/%{name}@.service
%doc
%attr(644, root, root) /usr/share/man/man1/%{name}.1.gz
%changelog
* Wed Oct 18 2023 Jeff Stafford - 0.14.1
- Fixes a bug with file corruption in some scenarios from version 0.14.0.
* Fri Jul 14 2023 Jeff Stafford - 0.14.0
- We now use quickxorhash checksums for both personal and business accounts.
- The cache for file contents has been moved out of boltdb and onto the local filesystem.
This makes accessing, reading, and writing files faster than before.
- onedriver no longer allows you to create filenames that are not allowed by OneDrive.
* Tue Nov 1 2022 Jeff Stafford - 0.13.0
- The GUI has been rewritten in golang for ease of maintenance and code sharing with
the rest of the onedriver application.
- onedriver can now be configured with a config file at "~/.config/onedriver/config.yml".
- There is now a configuration menu in the GUI. You can now set a couple configuration
options that were previously only possible with "systemctl edit".
- The onedriver CLI now stores its cache in the same path that the GUI expects,
meaning that invoking the onedriver filesystem directly and via the GUI will share the
cache as long as the mountpoint is the same.
- onedriver now prefers multipart downloads for files >10MB instead of a single massive
GET request. This should significantly improve reliability when working with large files.
* Tue Nov 2 2021 Jeff Stafford - 0.12.0
- Major internal rewrite - onedriver now talks directly to the kernel instead of using
go-fuse/fs as an intermediary. This makes metadata operations a bit faster.
- onedriver better handles completion of multipart uploads and does not repeatedly
upload files on success. This significantly improves upload speed.
- Fixes a crash when writes begin at an offset beyond maximum file length. This fixes a
bug where running ld inside the filesystem would cause it to crash.
- Switch to using zerolog instead of logrus for logging. Though zerolog is supposedly
faster, the real reason to switch is that it's much easier for me (and hopefully you)
to read! Also, pretty colors!
- onedriver now gives you the option to choose to authenticate via the terminal when
authenticating via the new --no-browser option (this is the functionality from the
old "headless" build).
- Add a workaround for the TLS cert authentication issue from
https://bugzilla.redhat.com/show_bug.cgi?id=2024296
* Tue Aug 17 2021 Jeff Stafford - 0.11.2
- onedriver now disallows rmdir on nonempty directories.
- The filesystem now detects if it is offline more reliably.
* Sun Jul 11 2021 Jeff Stafford - 0.11.1
- Fix startup crash in onedriver-launcher when onedriver has not been launched before.
* Sat Jul 3 2021 Jeff Stafford - 0.11.0
- Now includes a snazzy GUI for managing your mountpoints. No terminal skills are required
to use onedriver now.
- The upload logic has been rewritten to no longer use 0-byte files as placeholders in
any scenario. This fixes a race condition where software like LibreOffice, KeepassXC, or
Krita could generate a 0-byte file instead of the intended file when the file was 4MB or
larger.
- onedriver now uses etags AND modification times when syncing server-side changes back to
the client. This reduces the number of times that files must be redownloaded because of
bad timestamp data from the Microsoft API.
* Mon May 17 2021 Jeff Stafford - 0.10.1
- Fix the onedriver .desktop launcher so it uses the new systemd unit name.
* Mon May 17 2021 Jeff Stafford - 0.10.0
- Add AUR installation method for Arch-based distros - thanks fmoledina!
- Add manpage for onedriver - thanks GenericGuy!
- The onedriver systemd service now restarts itself in the event of a crash -
thanks dipunm!
- Fix a rare crash while syncing server-side changes missing checksums.
- Fix a race-condition that caused uploaded files to occasionally be replaced by a 0-byte
copy (most commonly caused by the way LibreOffice saves files).
- Cap number of uploads that can be in-progress at any time to 5. This makes uploading
uploading directories with lots of files appear to go a bit faster.
- The account name is now displayed in the title bar if you need to reauthenticate to
OneDrive (makes it easier to know which credentials to use when prompted).
* Tue Sep 29 2020 Jeff Stafford - 0.9.2
- Adds fix for server-side update to Microsoft's authentication APIs.
- Fix a crash on auth renewal after computer suspend or other network interruption.
* Sat Jun 6 2020 Jeff Stafford - 0.9.1
- Filenames are now sanitized when uploading new files.
- onedriver now only syncs metadata changes for a file from server to client if its
contents have changed as well. This means that programs like LibreOffice will no longer
complain about their lockfiles being updated while saving.
* Wed Jun 3 2020 Jeff Stafford - 0.9.0
- Multiple OneDrive drives can now be mounted simultaneously via systemd.
- Uploads are now retried, with failed uploads retried automatically.
- In-progress uploads are now cached on disk and resumed the next time onedriver starts
if the upload is terminated prematurely (for instance, if a user shuts down their computer)
- All uploads are now verified against checksums of their local content.
* Thu Apr 2 2020 Jeff Stafford - 0.8.0
- Add a desktop launcher for single drive scenarios (better multi-drive support coming soon!).
- Fix for directories containing more than 200 items.
- Miscellaneous fixes and tests for OneDrive for Business
- Compatibility with Go 1.14
* Mon Feb 17 2020 Jeff Stafford - 0.7.2
- Allow use of disk cache after filesystem transitions from offline to online.
* Mon Feb 17 2020 Jeff Stafford - 0.7.1
- Fix for filesystem coming up blank after user systemd session start.
* Wed Feb 12 2020 Jeff Stafford - 0.7.0
- Now has drive username in Nautilus sidebar and small OneDrive logo on mountpoint.
- No longer requires manually closing the authentication window.
- Add systemd user service for automount on boot.
- Now transitions gracefully from online to offline (or vice-versa) depending on network availability.
* Thu Jan 16 2020 Jeff Stafford - 0.6
- Filesystem metadata is now serialized to disk at regular intervals.
- Using on-disk metadata, onedriver can now be used in read-only mode while offline.
- onedriver now stores its on-disk cache and auth tokens under the normal user cache directory.
* Mon Nov 4 2019 Jeff Stafford - 0.5
- Add a dedicated thread responsible for syncing remote changes to local cache every 30s.
- Add a dedicated thread to monitor, deduplicate, and retry uploads.
- Now all HTTP requests will retry server-side 5xx errors a single time by default.
- Print HTTP status code with Graph API errors where they occur.
- Purge file contents from memory on flush() and store them on disk.
- onedriver now validates on-disk file contents using checksums before using them.
* Sun Sep 15 2019 Jeff Stafford - 0.4
- Port to go-fuse version 2 and the new nodefs API for improved performance.
* Sat Sep 7 2019 Jeff Stafford - 0.3
- Initial .spec file
onedriver-0.14.1/pkg/ 0000775 0000000 0000000 00000000000 14513675524 0014367 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/pkg/debian/ 0000775 0000000 0000000 00000000000 14513675524 0015611 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/pkg/debian/changelog 0000664 0000000 0000000 00000015637 14513675524 0017477 0 ustar 00root root 0000000 0000000 onedriver (0.14.1-1) jammy; urgency=low
* Fixes a bug with file corruption in some scenarios from version 0.14.0.
-- Jeff Stafford Wed, 18 Oct 2023 02:00:00 -0400
onedriver (0.14.0-2) jammy; urgency=low
* This is a dummy release to forcibly refresh the GPG key in the Debian OBS repositories.
No actual changes.
-- Jeff Stafford Mon, 16 Oct 2023 17:00:00 -0400
onedriver (0.14.0-1) jammy; urgency=low
* We now use quickxorhash checksums for both personal and business accounts.
* The cache for file contents has been moved out of boltdb and onto the local filesystem.
This makes accessing, reading, and writing files faster than before.
* onedriver no longer allows you to create filenames that are not allowed by OneDrive.
-- Jeff Stafford Fri, 14 Jul 2023 01:00:00 -0400
onedriver (0.13.0-1) focal; urgency=low
* The GUI has been rewritten in golang for ease of maintenance and code sharing with
the rest of the onedriver application.
* onedriver can now be configured with a config file at "~/.config/onedriver/config.yml".
* There is now a configuration menu in the GUI. You can now set a couple configuration
options that were previously only possible with "systemctl edit".
* The onedriver CLI now stores its cache in the same path that the GUI expects,
meaning that invoking the onedriver filesystem directly and via the GUI will share the
cache as long as the mountpoint is the same.
* onedriver now prefers multipart downloads for files >10MB instead of a single massive
GET request. This should significantly improve reliability when working with large files.
-- Jeff Stafford Tue, 1 Nov 2022 22:00:00 -0400
onedriver (0.12.0-1) focal; urgency=low
* Major internal rewrite - onedriver now talks directly to the kernel instead of using
go-fuse/fs as an intermediary. This makes metadata operations a bit faster.
* onedriver better handles completion of multipart uploads and does not repeatedly
upload files on success. This significantly improves upload speed.
* Fixes a crash when writes begin at an offset beyond maximum file length. This fixes a
bug where running ld inside the filesystem would cause it to crash.
* Switch to using zerolog instead of logrus for logging. Though zerolog is supposedly
faster, the real reason to switch is that it's much easier for me (and hopefully you)
to read. Also, pretty colors!
* onedriver now gives you the option to choose to authenticate via the terminal when
authenticating via the new --no-browser option (this is the functionality from the
old "headless" build).
* Add a workaround for the TLS cert authentication issue from
https://bugzilla.redhat.com/show_bug.cgi?id=2024296
-- Jeff Stafford Tue, 2 Nov 2021 19:00:00 -0400
onedriver (0.11.2-1) focal; urgency=low
* onedriver now disallows rmdir on nonempty directories.
* The filesystem now detects if it is offline more reliably.
-- Jeff Stafford Tue, 17 Aug 2021 00:15:00 -0400
onedriver (0.11.1-1) focal; urgency=low
* Fix startup crash in onedriver-launcher when onedriver has not been launched before.
-- Jeff Stafford Sun, 11 Jul 2021 00:30:00 -0400
onedriver (0.11.0-1) focal; urgency=low
* Now includes a snazzy GUI for managing your mountpoints. No terminal skills are required
to use onedriver now.
* The upload logic has been rewritten to no longer use 0-byte files as placeholders in
any scenario. This fixes a race condition where software like LibreOffice, KeepassXC, or
Krita could generate a 0-byte file instead of the intended file when the file was 4MB or
larger.
* onedriver now uses etags AND modification times when syncing server-side changes back to
the client. This reduces the number of times that files must be redownloaded because of
bad timestamp data from the Microsoft API.
-- Jeff Stafford Sat, 3 Jul 2021 13:30:00 -0400
onedriver (0.10.1-1) focal; urgency=low
* Fix the onedriver .desktop launcher so it uses the new systemd unit name.
-- Jeff Stafford Mon, 17 May 2021 02:30:00 -0400
onedriver (0.10.0-1) focal; urgency=low
* Adds AUR installation method for Arch-based distros - thanks fmoledina!
* Add manpage for onedriver - thanks GenericGuy!
* The onedriver systemd service now restarts itself in the event of a crash -
thanks dipunm!
* Fix a rare crash while syncing server-side changes missing checksums.
* Fix a race-condition that caused uploaded files to occasionally be replaced by a 0-byte
copy (most commonly caused by the way LibreOffice saves files).
* Cap number of uploads that can be in-progress at any time to 5. This makes uploading
uploading directories with lots of files appear to go a bit faster.
* The account name is now displayed in the title bar if you need to reauthenticate to
OneDrive (makes it easier to know which credentials to use when prompted).
-- Jeff Stafford Mon, 17 May 2021 01:45:00 -0400
onedriver (0.9.2-1) focal; urgency=low
* Adds fix for server-side update to Microsoft's authentication APIs.
* Fix a crash on auth renewal after computer suspend or other network interruption.
-- Jeff Stafford Tue, 29 Sep 2020 20:50:00 -0400
onedriver (0.9.1-1) focal; urgency=low
* Filenames are now sanitized when uploading new files.
* onedriver now only syncs metadata changes for a file from server to client if its
contents have changed as well. This means that programs like LibreOffice will no longer
complain about their lockfiles being updated while saving.
-- Jeff Stafford Sat, 6 Jun 2020 12:18:00 -0400
onedriver (0.9.0-1) unstable; urgency=low
* Multiple OneDrive drives can now be mounted simultaneously via systemd.
* Uploads are now retried, with failed uploads retried automatically.
* In-progress uploads are now cached on disk and resumed the next time onedriver starts
if the upload is terminated prematurely (for instance, if a user shuts down their computer)
* All uploads are now verified against checksums of their local content.
-- Jeff Stafford Wed, 3 Jun 2020 18:29:00 -0400
onedriver (0.8.0-1) unstable; urgency=low
* Add a desktop launcher for single drive scenarios (better multi-drive support coming soon!).
* Fix for directories containing more than 200 items.
* Miscellaneous fixes and tests for OneDrive for Business
* Compatibility with Go 1.14
-- Jeff Stafford Thu, 2 Apr 2020 17:18:00 -0500
onedriver (0.7.2-1) unstable; urgency=low
* Initial debian build.
-- Jeff Stafford Mon, 17 Feb 2020 22:24:56 -0500
onedriver-0.14.1/pkg/debian/compat 0000664 0000000 0000000 00000000003 14513675524 0017010 0 ustar 00root root 0000000 0000000 10
onedriver-0.14.1/pkg/debian/control 0000664 0000000 0000000 00000001270 14513675524 0017214 0 ustar 00root root 0000000 0000000 Source: onedriver
Section: utils
Priority: optional
Maintainer: Jeff Stafford
Build-Depends:
golang (>= 1.15), gcc, pkg-config, libwebkit2gtk-4.0-dev, git, debhelper
Standards-Version: 4.4.1
Homepage: https://github.com/jstaf/onedriver
#Vcs-Browser: https://github.com/jstaf/onedriver
#Vcs-Git: https://github.com/jstaf/onedriver.git
Package: onedriver
Architecture: any
Depends: libwebkit2gtk-4.0-37, fuse
Suggests: systemd
Description:
A native Linux filesystem for Microsoft Onedrive Onedriver is a native Linux
filesystem for Microsoft Onedrive. Files and metadata are downloaded on-demand
instead of requiring you to sync your entire account to disk.
onedriver-0.14.1/pkg/debian/copyright 0000664 0000000 0000000 00000001631 14513675524 0017545 0 ustar 00root root 0000000 0000000 Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: onedriver
Upstream-Contact: Jeff Stafford
Source: https://github.com/jstaf/onedriver
Files: *
Copyright: 2022 Jeff Stafford
License: GPL-3+
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
onedriver-0.14.1/pkg/debian/rules 0000775 0000000 0000000 00000003105 14513675524 0016670 0 ustar 00root root 0000000 0000000 #!/usr/bin/make -f
%:
dh $@
override_dh_auto_test:
@echo "skipping tests..."
override_dh_auto_clean:
rm -f *.db *.rpm *.deb *.dsc *.log *.fa *.xz *.gz *.test onedriver onedriver-headless unshare .auth_tokens.json filelist.txt
rm -rf util-linux-*/ onedriver-*/
override_dh_auto_build:
bash cgo-helper.sh
# GOCACHE will be for a nonexistent user in pbuilder otherwise
GOCACHE=/tmp/go-cache go build -v -mod=vendor \
-ldflags="-X github.com/jstaf/onedriver/cmd/common.commit=$(shell cat .commit)" \
./cmd/onedriver
GOCACHE=/tmp/go-cache go build -v -mod=vendor \
-ldflags="-X github.com/jstaf/onedriver/cmd/common.commit=$(shell cat .commit)" \
./cmd/onedriver-launcher
gzip pkg/resources/onedriver.1
override_dh_auto_install:
install -D -m 0755 onedriver $$(pwd)/debian/onedriver/usr/bin/onedriver
install -D -m 0755 onedriver-launcher $$(pwd)/debian/onedriver/usr/bin/onedriver-launcher
install -D -m 0644 pkg/resources/onedriver.png $$(pwd)/debian/onedriver/usr/share/icons/onedriver/onedriver.png
install -D -m 0644 pkg/resources/onedriver-128.png $$(pwd)/debian/onedriver/usr/share/icons/onedriver/onedriver-128.png
install -D -m 0644 pkg/resources/onedriver.svg $$(pwd)/debian/onedriver/usr/share/icons/onedriver/onedriver.svg
install -D -m 0644 pkg/resources/onedriver.desktop $$(pwd)/debian/onedriver/usr/share/applications/onedriver.desktop
install -D -m 0644 pkg/resources/onedriver@.service $$(pwd)/debian/onedriver/usr/lib/systemd/user/onedriver@.service
install -D -m 0644 pkg/resources/onedriver.1.gz $$(pwd)/debian/onedriver/usr/share/man/man1/onedriver.1.gz
onedriver-0.14.1/pkg/debian/source/ 0000775 0000000 0000000 00000000000 14513675524 0017111 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/pkg/debian/source/format 0000664 0000000 0000000 00000000014 14513675524 0020317 0 ustar 00root root 0000000 0000000 3.0 (quilt)
onedriver-0.14.1/pkg/resources/ 0000775 0000000 0000000 00000000000 14513675524 0016401 5 ustar 00root root 0000000 0000000 onedriver-0.14.1/pkg/resources/config-example.yml 0000664 0000000 0000000 00000002320 14513675524 0022017 0 ustar 00root root 0000000 0000000 # What log level should onedriver use (debug, info, and "warn" are recommended).
# - trace - Log everything, including every syscall handled by the filesystem.
# - debug - Log all operations that modify a file or directory.
# - info - Log "big" operations like uploads and downloads.
# - warn - These are warnings. Usually not a problem.
# - error - Things that onedriver doesn't like, but can continue running
# (can possibly result in file corruption or inability to do something).
# - fatal - Only log errors that kill the program (this log level is not recommended).
log: debug
# cacheDir specifies which directory onedriver should store its data in.
# This directory can get pretty large. "~" is a placeholder for your home directory.
cacheDir: ~/.cache/onedriver
# Don't uncomment or change this unless you are a super duper expert and have
# registered your own version of onedriver in Azure Active Directory. These are the
# default values.
#auth:
# clientID: "3470c3fa-bc10-45ab-a0a9-2d30836485d1"
# codeURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize"
# tokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token"
# redirectURL: "https://login.live.com/oauth20_desktop.srf"
onedriver-0.14.1/pkg/resources/onedriver-128.png 0000664 0000000 0000000 00000031535 14513675524 0021423 0 ustar 00root root 0000000 0000000 PNG
IHDR >a zTXtRaw profile type exif xY
D
/p |_P
kkT)%bi.J/z28h5t_#|
u|
ׁ=o?$[~
_Ey>Ƴ,_z_0yًe;U3ω"C(kvack08}jEes~}$KA._w!A9ב|~?ό~۹kf#B]^z/qZ~w^@a'+ v+0%IN*"K=b.+Z}#iOQ>s /wx\*wpB}bżD,L2g\FFy5 F2-V"g7&7ё3O
_"LdC d#AKL2@Y6c!7MRýTpq2#*䆺#Y)eSSC#ǜr%rϣĒJ.b8j\K^G-JF!Kw.cʌ3fuXgWYuز?vuР@If-ZiqډO9d֟#k5'k=D0:ɖ3&.2^- Z,g2g9],L2[vA 9y2jj.oW3,umu3TGϵ
iwG?
(}$g1m'm){#N䝵w
A}>î8}9#c5tqZ9:NyR|Zgemi;RgcnqNJ\Yu7JQj#yq{tF=JxQpmcϝٻ4@bqJμAS)rԖGQJy|}3iDO{T{q$:iB'թCѧJ@.[[,Ι{Cylϙlqe'w\[Ri&FqtS̽8KT\{tv&}G7 80{!$5aRÐ:j; ,3#8-9 l05Z'!,$P:J}XX
ܷF+N~HK4] 4| u%(tQXw7t3$9Y%Sƞk iA Pk;es7{ulڕe[*G˕vtN.zԚ^f+T鸨;E)őfDePyk[Brvl!ix] YϙN8 ;Q%#45S >RZ6$gֳk~2]&6D
ES_%aM.g#(GU SWn{X2@A-fi9k@̧?MF5rZ)OTZ(9E=2Hf<醑AXRgkP>AE+/QBfo2bWf$q!(~<*K<'Fk\IabP{YAv"Y{Ԑ3}v7Kq7@x&5BF$XT `yxz7H|MNO"^O;ɔٔlꗱhn?`4z@fOsmCZ2c%TŐEI@ c]f%/uChoF20ma5(A.G|T3DA?
#]"vkٝ Q흙#99eLJu2iestjcaS9"Ce'^WgP(,lj3Fm2XF;zii*c KMm@HD8W)i}*- '.K)aYZN9[˛ЁziӎK&ϴp@<{dl6-+r[\]1b1)f1ash-[K~'55$B7xzLfn2;qxmN`^bW0?ܽ=y)r$
;QN'R Oy[^emyèF<Fng