Browse Source

first commit

tags/v0.1.6
Jonathan Cobb 5 years ago
commit
4d3a9ace69
100 changed files with 6066 additions and 0 deletions
  1. +16
    -0
      .gitignore
  2. +18
    -0
      .gitmodules
  3. +695
    -0
      LICENSE.md
  4. +44
    -0
      README.md
  5. +2
    -0
      automation/.gitignore
  6. +56
    -0
      automation/roles/algo/files/algo_refresh_users.sh
  7. +47
    -0
      automation/roles/algo/files/algo_refresh_users_monitor.sh
  8. +13
    -0
      automation/roles/algo/files/bubble_role.json
  9. +187
    -0
      automation/roles/algo/files/config.cfg.hbs
  10. BIN
     
  11. +5
    -0
      automation/roles/algo/files/supervisor_algo_refresh_users_monitor.conf
  12. +89
    -0
      automation/roles/algo/tasks/algo_firewall.yml
  13. +46
    -0
      automation/roles/algo/tasks/main.yml
  14. +40
    -0
      automation/roles/algo/templates/install_algo.sh.j2
  15. +2
    -0
      automation/roles/bubble/files/bsql.sh
  16. +140
    -0
      automation/roles/bubble/files/bubble_restore_monitor.sh
  17. +30
    -0
      automation/roles/bubble/files/bubble_role.json
  18. +140
    -0
      automation/roles/bubble/files/init_bubble_db.sh
  19. +61
    -0
      automation/roles/bubble/files/init_roles.sh
  20. +99
    -0
      automation/roles/bubble/files/pg_hba.conf
  21. +614
    -0
      automation/roles/bubble/files/postgresql.conf
  22. +9
    -0
      automation/roles/bubble/files/random_password.sh
  23. +6
    -0
      automation/roles/bubble/handlers/main.yml
  24. +135
    -0
      automation/roles/bubble/tasks/main.yml
  25. +16
    -0
      automation/roles/bubble/tasks/postgresql.yml
  26. +30
    -0
      automation/roles/bubble/tasks/postgresql_data.yml
  27. +15
    -0
      automation/roles/bubble/tasks/restore.yml
  28. +9
    -0
      automation/roles/bubble/templates/bubble.env.j2
  29. +19
    -0
      automation/roles/bubble/templates/full_reset_db.sh.j2
  30. +34
    -0
      automation/roles/bubble/templates/snapshot_ansible.sh.j2
  31. +11
    -0
      automation/roles/bubble_finalizer/files/bubble_role.json
  32. +30
    -0
      automation/roles/bubble_finalizer/files/copy_certs_to_bubble.sh
  33. +9
    -0
      automation/roles/bubble_finalizer/files/supervisor_bubble.conf
  34. +40
    -0
      automation/roles/bubble_finalizer/tasks/main.yml
  35. +9
    -0
      automation/roles/common/files/bubble_role.json
  36. +24
    -0
      automation/roles/common/files/dot-screenrc
  37. +36
    -0
      automation/roles/common/tasks/main.yml
  38. +5
    -0
      automation/roles/firewall/defaults/main.yml
  39. +180
    -0
      automation/roles/firewall/files/bubble_peer_manager.py
  40. +12
    -0
      automation/roles/firewall/files/bubble_role.json
  41. +5
    -0
      automation/roles/firewall/files/supervisor_bubble_peer_manager.conf
  42. +118
    -0
      automation/roles/firewall/tasks/main.yml
  43. +104
    -0
      automation/roles/firewall/tasks/port_redirect.yml
  44. +95
    -0
      automation/roles/firewall/tasks/sage.yml
  45. +23
    -0
      automation/roles/mitmproxy/files/bubble_api.py
  46. +48
    -0
      automation/roles/mitmproxy/files/bubble_modify.py
  47. +12
    -0
      automation/roles/mitmproxy/files/bubble_role.json
  48. +90
    -0
      automation/roles/mitmproxy/files/dns_spoofing.py
  49. +32
    -0
      automation/roles/mitmproxy/files/install_cert.sh
  50. BIN
     
  51. +28
    -0
      automation/roles/mitmproxy/files/reuse_bubble_mitm_certs.sh
  52. +19
    -0
      automation/roles/mitmproxy/files/run_mitmdump.sh
  53. +88
    -0
      automation/roles/mitmproxy/tasks/main.yml
  54. +55
    -0
      automation/roles/mitmproxy/tasks/route.yml
  55. +3
    -0
      automation/roles/mitmproxy/templates/bubble_config.py.j2
  56. +5
    -0
      automation/roles/mitmproxy/templates/supervisor_mitmproxy.conf.j2
  57. +34
    -0
      automation/roles/nginx/defaults/main.yml
  58. +12
    -0
      automation/roles/nginx/files/bubble_role.json
  59. +13
    -0
      automation/roles/nginx/files/init_certbot.sh
  60. +4
    -0
      automation/roles/nginx/handlers/main.yml
  61. +68
    -0
      automation/roles/nginx/tasks/main.yml
  62. +15
    -0
      automation/roles/nginx/tasks/site.yml
  63. +13
    -0
      automation/roles/nginx/templates/site.conf.j2
  64. +1
    -0
      automation/roles/nginx/templates/stronger_dhparams.conf
  65. +84
    -0
      bin/activate
  66. +16
    -0
      bin/bdecrypt
  67. +27
    -0
      bin/bdelete
  68. +16
    -0
      bin/bencrypt
  69. +27
    -0
      bin/bget
  70. +38
    -0
      bin/bmodel
  71. +38
    -0
      bin/bpatch
  72. +55
    -0
      bin/bpatchfull
  73. +45
    -0
      bin/bpost
  74. +28
    -0
      bin/bposte
  75. +45
    -0
      bin/bput
  76. +28
    -0
      bin/bpute
  77. +49
    -0
      bin/bscript
  78. +98
    -0
      bin/bubble
  79. +105
    -0
      bin/bubble_common
  80. +30
    -0
      bin/bunlock
  81. +53
    -0
      bin/cleanup_bubble_databases
  82. +55
    -0
      bin/create_user_and_network
  83. +49
    -0
      bin/first_time_setup.sh
  84. +29
    -0
      bin/first_time_ubuntu.sh
  85. +59
    -0
      bin/git_update_bubble.sh
  86. +18
    -0
      bin/list_bubble_databases
  87. +39
    -0
      bin/new_bubble.sh
  88. +104
    -0
      bin/prep_bubble_jar
  89. +36
    -0
      bin/proxy
  90. +47
    -0
      bin/reset_bubble_db
  91. +115
    -0
      bin/run.sh
  92. +63
    -0
      bin/update_role
  93. +283
    -0
      bubble-server/pom.xml
  94. +217
    -0
      bubble-server/src/main/java/bubble/ApiConstants.java
  95. +22
    -0
      bubble-server/src/main/java/bubble/BubbleHandlebars.java
  96. +61
    -0
      bubble-server/src/main/java/bubble/auth/BubbleAuthFilter.java
  97. +16
    -0
      bubble-server/src/main/java/bubble/auth/BubbleAuthProvider.java
  98. +29
    -0
      bubble-server/src/main/java/bubble/client/BubbleApiClient.java
  99. +146
    -0
      bubble-server/src/main/java/bubble/client/BubbleNodeClient.java
  100. +38
    -0
      bubble-server/src/main/java/bubble/client/BubbleNodeDownloadClient.java

+ 16
- 0
.gitignore View File

@@ -0,0 +1,16 @@
*.iml
.idea
tmp
logs
dependency-reduced-pom.xml
*.log
*~
target/surefire
target/surefire-reports
target/classes
target/test-classes
target/maven-archiver
target
.DS_Store

.BUBBLE*

+ 18
- 0
.gitmodules View File

@@ -0,0 +1,18 @@
[submodule "utils/cobbzilla-parent"]
path = utils/cobbzilla-parent
url = git@git.bubblev.org:bubblev/cobbzilla-parent.git
[submodule "utils/cobbzilla-utils"]
path = utils/cobbzilla-utils
url = git@git.bubblev.org:bubblev/cobbzilla-utils.git
[submodule "utils/cobbzilla-wizard"]
path = utils/cobbzilla-wizard
url = git@git.bubblev.org:bubblev/cobbzilla-wizard.git
[submodule "utils/restex"]
path = utils/restex
url = git@git.bubblev.org:bubblev/restex.git
[submodule "utils/templated-mail-sender"]
path = utils/templated-mail-sender
url = git@git.bubblev.org:bubblev/templated-mail-sender.git
[submodule "bubble-web"]
path = bubble-web
url = git@git.bubblev.org:bubblev/bubble-web.git

+ 695
- 0
LICENSE.md View File

@@ -0,0 +1,695 @@
# Copyright (C) 2019 Bubble, Inc.

## For ANY commercial use of this software
All rights are reserved. Please contact licensing@bubblev.com for more information.

## For individuals and not-for-profit organizations
If you are an individual person or a not-for-profit organization, and your usage of this software is entirely
non-commercial, you may use this software under the terms of the GNU Affero General Public License, version 3,
summarized below and reprinted in full thereafter.

```text
Bubble - a privacy-first VPN
Copyright (C) 2019 Bubble, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
```

----
Full text of the GNU Affero General Public License, version 3:

```text
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
```

+ 44
- 0
README.md View File

@@ -0,0 +1,44 @@
bubble
======

# Development Setup

## First-Time System Setup
After you clone this repository, run:

./bin/first_time_ubuntu.sh

If you are running on a non-Ubuntu system, copy that file to something like:

./bin/first_time_myoperatingsystem.sh

And then edit it such that all the same packages get installed. Then submit a pull request and we can add support for your operating system to the main repository.

You only need to run this command once, ever, on a development system. It ensures that the appropriate packages are installed and proper databases and database users exist.

## First-Time Dev Setup
After running the system setup above, run:

./bin/first_time_setup.sh

This will grab all the submodules and perform an initial build of all components.

## Bubble environment file
You will need a file named `${HOME}/.bubble.env` which contains various environment variables required to run the server.

Talk to another developer to get a copy of this file. Do not ever send this file over email or any other unencrypted channel.
Always use `scp` to copy this file from one machine to another.

After you have the env file in place, create a symlink called `${HOME}/.bubble-test.env`

cd ${HOME} && ln -s .bubble.env .bubble-test.env

## Subsequent Updates
If you want to grab the latest code, and ensure that all git submodules are properly in sync with the main repository, run:

./bin/git_update_bubble.sh

This will update and rebuild all submodules, and the main bubble jar file.

## Running in development
Assuming you ran the commands above, you can run a test server using the method described in the bubble-web [README](https://git.bubblev.org/bubbleV/bubble-web/src/branch/master/README.md).

+ 2
- 0
automation/.gitignore View File

@@ -0,0 +1,2 @@
config
.venv

+ 56
- 0
automation/roles/algo/files/algo_refresh_users.sh View File

@@ -0,0 +1,56 @@
#!/bin/bash

LOG=/tmp/bubble.algo_refresh_users.log

function die {
echo 1>&2 "${1}"
log "${1}"
exit 1
}

function log {
echo "$(date): ${1}" >> ${LOG}
}

ALGO_BASE=/root/ansible/roles/algo/algo-master
if [[ ! -d ${ALGO_BASE} ]] ; then
die "Algo VPN directory ${ALGO_BASE} not found"
fi

CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD"
if [[ ! -f "${CA_PASS_FILE}" ]] ; then
die "No CA password file found: ${CA_PASS_FILE}"
fi
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then
die "No ${ALGO_BASE}/config.cfg.hbs found"
fi

log "Regenerating algo config..."
java -cp /home/bubble/current/bubble.jar bubble.main.BubbleMain generate-algo-conf --algo-config ${ALGO_BASE}/config.cfg.hbs || die "Error writing algo config.cfg"

log "Updating algo VPN users..."
cd ${ALGO_BASE} && \
python3 -m virtualenv --python="$(command -v python3)" .env \
&& source .env/bin/activate \
&& python3 -m pip install -U pip virtualenv \
&& python3 -m pip install -r requirements.txt \
&& ansible-playbook users.yml --tags update-users --skip-tags debug \
-e "ca_password=$(cat ${CA_PASS_FILE})
provider=local
server=localhost
store_cakey=true
ondemand_cellular=false
ondemand_wifi=false
store_pki=true
dns_adblocking=false
ssh_tunneling=false
endpoint={{ endpoint }}
server_name={{ server_name }}" 2>&1 | tee -a ${LOG} || die "Error running algo users.yml"

# Archive configs in a place that the BackupService can pick them up
log "Sync'ing algo VPN users to bubble..."
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz
cd ${ALGO_BASE} && tar czf ${CONFIGS_BACKUP} configs && chgrp bubble ${CONFIGS_BACKUP} && chmod 660 ${CONFIGS_BACKUP} || die "Error backing up algo configs"
cd /home/bubble && rm -rf configs/* && tar xzf ${CONFIGS_BACKUP} && chgrp -R bubble configs && chown -R bubble configs && chmod 500 configs || die "Error unpacking algo configs to bubble home"

log "VPN users successfully sync'd to bubble"

+ 47
- 0
automation/roles/algo/files/algo_refresh_users_monitor.sh View File

@@ -0,0 +1,47 @@
#!/bin/bash

LOG=/tmp/bubble.algo_refresh_users_monitor.log

function die {
echo 1>&2 "${1}"
log "${1}"
exit 1
}

function log {
echo "$(date): ${1}" >> ${LOG}
}

ALGO_BASE=/root/ansible/roles/algo/algo-master
if [[ ! -d ${ALGO_BASE} ]] ; then
die "Algo VPN directory ${ALGO_BASE} not found"
fi

CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD"
if [[ ! -f "${CA_PASS_FILE}" ]] ; then
die "No CA password file found: ${CA_PASS_FILE}"
fi
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then
die "No ${ALGO_BASE}/config.cfg.hbs found"
fi

BUBBLE_USER_MARKER=/home/bubble/.algo_refresh_users
ALGO_USER_MARKER=${ALGO_BASE}/.algo_refresh_users

if [[ ! -f ${BUBBLE_USER_MARKER} ]] ; then
touch ${BUBBLE_USER_MARKER} && chown bubble ${BUBBLE_USER_MARKER}
fi
if [[ ! -f ${ALGO_USER_MARKER} ]] ; then
touch ${ALGO_USER_MARKER}
fi

log "Watching marker file..."
while : ; do
if [[ $(stat -c %Y ${BUBBLE_USER_MARKER}) -gt $(stat -c %Y ${ALGO_USER_MARKER}) ]] ; then
touch ${ALGO_USER_MARKER}
sleep 5s
log "Refreshing VPN users..."
/usr/local/bin/algo_refresh_users.sh && log "VPN users successfully refreshed" || log "Error refreshing Algo VPN users"
fi
sleep 10s
done

+ 13
- 0
automation/roles/algo/files/bubble_role.json View File

@@ -0,0 +1,13 @@
{
"name": "algo-0.0.1",
"priority": 400,
"template": true,
"install": "node",
"config": [
{"name": "server_name", "value": "[[node.fqdn]]"},
{"name": "endpoint", "value": "[[node.ip4]]"},
{"name": "dns_port", "value": "[[configuration.dnsPort]]"},
{"name": "ssl_port", "value": "[[configuration.nginxPort]]"}
],
"tgzB64": ""
}

+ 187
- 0
automation/roles/algo/files/config.cfg.hbs View File

@@ -0,0 +1,187 @@
---

# This is the list of users to generate.
# Every device must have a unique username.
# You can generate up to 250 users at one time.
# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123".
users:
<<#each bubbleUsers>> - "<<this>>"
<</each>>

### Advanced users only below this line ###

# Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false
# Supports on MacOS and Linux only (including Windows Subsystem for Linux)
pki_in_tmpfs: true

# If True re-init all existing certificates. Boolean
keys_clean_all: False

# Deploy StrongSwan to enable IPsec support
ipsec_enabled: true

# StrongSwan log level
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration
strongswan_log_level: 2

# rightsourceip for ipsec
# ipv4
strongswan_network: 10.19.48.0/24
# ipv6
strongswan_network_ipv6: 'fd9d:bc11:4020::/48'

# Deploy WireGuard
# WireGuard will listen on 51820/UDP. You might need to change to another port
# if your network blocks this one. Be aware that 53/UDP (DNS) is blocked on some
# mobile data networks.
wireguard_enabled: true
wireguard_port: 51820
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent.
# This option will keep the "connection" open in the eyes of NAT.
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence
wireguard_PersistentKeepalive: 0

# WireGuard network configuration
wireguard_network_ipv4: 10.19.49.0/24
wireguard_network_ipv6: fd9d:bc11:4021::/48

# Reduce the MTU of the VPN tunnel
# Some cloud and internet providers use a smaller MTU (Maximum Transmission
# Unit) than the normal value of 1500 and if you don't reduce the MTU of your
# VPN tunnel some network connections will hang. Algo will attempt to set this
# automatically based on your server, but if connections hang you might need to
# adjust this yourself.
# See: https://github.com/trailofbits/algo/blob/master/docs/troubleshooting.md#various-websites-appear-to-be-offline-through-the-vpn
reduce_mtu: 0

# Algo will use the following lists to block ads. You can add new block lists
# after deployment by modifying the line starting "BLOCKLIST_URLS=" at:
# /usr/local/sbin/adblock.sh
# If you load very large blocklists, you may also have to modify resource limits:
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf
adblock_lists:
- "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts"
- "https://hosts-file.net/ad_servers.txt"

# Enable DNS encryption.
# If 'false', 'dns_servers' should be specified below.
# DNS encryption can not be disabled if DNS adblocking is enabled
dns_encryption: true

# DNS servers which will be used if 'dns_encryption' is 'true'. Multiple
# providers may be specified, but avoid mixing providers that filter results
# (like Cisco) with those that don't (like Cloudflare) or you could get
# inconsistent results. The list of available public providers can be found
# here:
# https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v2/public-resolvers.md
dnscrypt_servers:
ipv4:
- cloudflare
- google
ipv6:
- cloudflare-ipv6

# DNS servers which will be used if 'dns_encryption' is 'false'.
# The default is to use Cloudflare.
dns_servers:
ipv4:
- 1.1.1.1
- 1.0.0.1
ipv6:
- 2606:4700:4700::1111
- 2606:4700:4700::1001

# Randomly generated IP address for the local dns resolver
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}"

# Your Algo server will automatically install security updates. Some updates
# require a reboot to take effect but your Algo server will not reboot itself
# automatically unless you change 'enabled' below from 'false' to 'true', in
# which case a reboot will take place if necessary at the time specified (as
# HH:MM) in the time zone of your Algo server. The default time zone is UTC.
unattended_reboot:
enabled: false
time: 06:00

# Block traffic between connected clients
BetweenClients_DROP: true

# Block SMB/CIFS traffic
block_smb: true

# Block NETBIOS traffic
block_netbios: true

congrats:
common: |
"# Congratulations! #"
"# Your Algo server is running. #"
"# Config files and certificates are in the ./configs/ directory. #"
"# Go to https://whoer.net/ after connecting #"
"# and ensure that all your traffic passes through the VPN. #"
"# Local DNS resolver {{ local_service_ip }}{{ ', ' + local_service_ipv6 if ipv6_support else '' }} #"
p12_pass: |
"# The p12 and SSH keys password for new users is {{ p12_export_password }} #"
ca_key_pass: |
"# The CA key password is {{ CA_password|default(omit) }} #"
ssh_access: |
"# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #"

SSH_keys:
comment: algo@ssh
private: configs/algo.pem
private_tmp: /tmp/algo-ssh.pem
public: configs/algo.pem.pub

cloud_providers:
azure:
size: Standard_B1S
image: 19.04
digitalocean:
size: s-1vcpu-1gb
image: "ubuntu-19-04-x64"
ec2:
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest.
encrypted: true
# Set use_existing_eip to "true" if you want to use a pre-allocated Elastic IP
# Additional prompt will be raised to determine which IP to use
use_existing_eip: false
size: t2.micro
image:
name: "ubuntu-disco-19.04"
owner: "099720109477"
gce:
size: f1-micro
image: ubuntu-1904
external_static_ip: false
lightsail:
size: nano_1_0
image: ubuntu_18_04
scaleway:
size: DEV1-S
image: Ubuntu Bionic Beaver
arch: x86_64
hetzner:
server_type: cx11
image: ubuntu-18.04
openstack:
flavor_ram: ">=512"
image: Ubuntu-18.04
cloudstack:
size: Micro
image: Linux Ubuntu 19.04 64-bit
disk: 10
vultr:
os: Ubuntu 19.04 x64
size: 1024 MB RAM,25 GB SSD,1.00 TB BW
local:

fail_hint:
- Sorry, but something went wrong!
- Please check the troubleshooting guide.
- https://trailofbits.github.io/algo/troubleshooting.html

booleans_map:
Y: true
y: true

BIN
View File


+ 5
- 0
automation/roles/algo/files/supervisor_algo_refresh_users_monitor.conf View File

@@ -0,0 +1,5 @@

[program:algo_refresh_users_monitor]
stdout_logfile = /dev/null
stderr_logfile = /dev/null
command=/usr/local/bin/algo_refresh_users_monitor.sh

+ 89
- 0
automation/roles/algo/tasks/algo_firewall.yml View File

@@ -0,0 +1,89 @@
# Insert additional firewall rules to allow required services to function
- name: Allow HTTP
iptables:
chain: INPUT
action: insert
rule_num: 5
protocol: tcp
destination_port: 80
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new HTTP connections
become: yes

- name: Allow HTTPS
iptables:
chain: INPUT
action: insert
rule_num: 6
protocol: tcp
destination_port: 443
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new HTTPS connections
become: yes

- name: Allow admin HTTPS on port {{ ssl_port }}
iptables:
chain: INPUT
action: insert
rule_num: 7
protocol: tcp
destination_port: "{{ ssl_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new admin SSL connections
become: yes

- name: "Allow DNS over TCP on private port"
iptables:
chain: INPUT
action: insert
rule_num: 8
protocol: tcp
destination_port: "{{ dns_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new TCP DNS connections on private port
become: yes

- name: "Allow DNS over UDP on private port"
iptables:
chain: INPUT
action: insert
rule_num: 9
protocol: udp
destination_port: "{{ dns_port }}"
jump: ACCEPT
comment: Accept new UDP DNS connections on private port
become: yes

- name: "Redirect DNS TCP port"
iptables:
chain: PREROUTING
table: nat
action: insert
rule_num: 1
protocol: tcp
destination_port: "53"
jump: REDIRECT
to_ports: "{{ dns_port }}"
comment: Redirect DNS TCP connections
become: yes

- name: "Redirect DNS UDP port"
iptables:
chain: PREROUTING
table: nat
action: insert
rule_num: 2
protocol: udp
destination_port: "53"
jump: REDIRECT
to_ports: "{{ dns_port }}"
comment: Redirect DNS UDP connections
become: yes

+ 46
- 0
automation/roles/algo/tasks/main.yml View File

@@ -0,0 +1,46 @@
- name: Unzip algo master.zip
unarchive:
src: master.zip
dest: /root/ansible/roles/algo

- name: Write algo config.cfg.hbs
copy:
src: config.cfg.hbs
dest: /root/ansible/roles/algo/algo-master/config.cfg.hbs

- name: Install algo_refresh_users script and monitor
copy:
src: "{{ item }}"
dest: "/usr/local/bin/{{ item }}"
owner: root
group: root
mode: 0500
with_items:
- "algo_refresh_users.sh"
- "algo_refresh_users_monitor.sh"

- name: Install algo_refresh_users_monitor supervisor conf file
copy:
src: supervisor_algo_refresh_users_monitor.conf
dest: /etc/supervisor/conf.d/algo_refresh_users_monitor.conf

- name: Write install_algo.sh template
template:
src: install_algo.sh.j2
dest: /root/ansible/roles/algo/algo-master/install_algo.sh
owner: root
group: root
mode: 0500

# Don't setup algo when in restore mode, bubble_restore_monitor.sh will set it up after the CA key has been restored
- name: Run algo playbook to install algo
shell: /root/ansible/roles/algo/algo-master/install_algo.sh
when: restore_key is not defined

# Don't start algo_refresh_users_monitor when in restore mode, bubble_restore_monitor.sh will start it after algo is installed
- name: Run algo playbook to install algo
shell: bash -c "supervisorctl reload && sleep 5s && supervisorctl restart algo_refresh_users_monitor"
when: restore_key is not defined

- include: algo_firewall.yml


+ 40
- 0
automation/roles/algo/templates/install_algo.sh.j2 View File

@@ -0,0 +1,40 @@
#!/bin/bash

function die {
echo 1>&2 "${1}"
exit 1
}

ALGO_BASE="$(cd $(dirname $0) && pwd)"
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD"

cd ${ALGO_BASE}

if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then
die "No ${ALGO_BASE}/config.cfg.hbs found"
fi

java -cp /home/bubble/current/bubble.jar bubble.main.BubbleMain generate-algo-conf --algo-config ${ALGO_BASE}/config.cfg.hbs || die "Error writing algo config.cfg"

python3 -m virtualenv --python="$(command -v python3)" .env \
&& source .env/bin/activate \
&& python3 -m pip install -U pip virtualenv \
&& python3 -m pip install -r requirements.txt \
&& ansible-playbook main.yml --skip-tags debug \
-e "ca_password_file=${CA_PASS_FILE}
ca_password_file_owner=bubble
provider=local
server=localhost
store_cakey=true
ondemand_cellular=false
ondemand_wifi=false
store_pki=true
dns_adblocking=false
ssh_tunneling=false
endpoint={{ endpoint }}
server_name={{ server_name }}" || die "Error installing algo"

# Archive configs in a place that the BackupService can pick them up
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz
cd ${ALGO_BASE} && tar czf ${CONFIGS_BACKUP} configs && chgrp bubble ${CONFIGS_BACKUP} && chmod 660 ${CONFIGS_BACKUP} || die "Error backing up algo configs"
cd /home/bubble && tar xzf ${CONFIGS_BACKUP} && chgrp -R bubble configs && chown -R bubble configs && chmod 500 configs || die "Error unpacking algo configs to bubble home"

+ 2
- 0
automation/roles/bubble/files/bsql.sh View File

@@ -0,0 +1,2 @@
#!/bin/bash
PGPASSWORD="$(cat /home/bubble/.BUBBLE_PG_PASSWORD)" psql -U bubble -h 127.0.0.1 bubble "${@}"

+ 140
- 0
automation/roles/bubble/files/bubble_restore_monitor.sh View File

@@ -0,0 +1,140 @@
#!/bin/bash

BUBBLE_HOME="/home/bubble"
RESTORE_MARKER="${BUBBLE_HOME}/.restore"
RESTORE_RUN_MARKER="${BUBBLE_HOME}/.restore_run"

SELF_NODE="self_node.json"
BUBBLE_SELF_NODE="${BUBBLE_HOME}/${SELF_NODE}"

ADMIN_PORT=${1:?no admin port provided}
TIMEOUT=${2:-3600} # 60 minutes default timeout

LOG=/tmp/bubble.restore.log

function die {
echo 1>&2 "${1}"
log "${1}"
exit 1
}

function log {
echo "${1}" >> ${LOG}
}

START=$(date +%s)
while [[ ! -f "${RESTORE_MARKER}" ]] ; do
sleep 5
if [[ $(expr $(date +%s) - ${START}) -gt ${TIMEOUT} ]] ; then
break
fi
done

if [[ ! -f "${RESTORE_MARKER}" ]] ; then
die "Restore marker was never created: ${RESTORE_MARKER}"
fi

# was a restore already attempted? only one attempt is allowed. start another restore (with a new node) if you need to try again
if [[ -f ${RESTORE_RUN_MARKER} ]] ; then
die "Restore was already attempted, cannot attempt again"
fi
touch ${RESTORE_RUN_MARKER}

# Ensure there is only one self_node.json in the backup. Otherwise maybe we have more than once backup, can't restore.
SELF_NODE_COUNT=$(find ${BUBBLE_HOME}/restore -type f -name "${SELF_NODE}" | wc -l | tr -d ' ')
if [[ ${SELF_NODE_COUNT} -eq 0 ]] ; then
die "Cannot restore, restore base could not be determined (no ${SELF_NODE} found under ${BUBBLE_HOME}/restore)"
elif [[ ${SELF_NODE_COUNT} -gt 1 ]] ; then
die "Cannot restore, restore base could not be determined (multiple ${SELF_NODE} files found under ${BUBBLE_HOME}/restore): $(find ${BUBBLE_HOME}/restore -type f -name "${SELF_NODE}")"
fi

# set RESTORE_BASE, ensure it is set
RESTORE_BASE=$(dirname $(find ${BUBBLE_HOME}/restore -type f -name "${SELF_NODE}" | head -1))
if [[ -z "${RESTORE_BASE}" ]] ; then
die "Cannot restore, restore base could not be determined (no ${SELF_NODE} found under ${BUBBLE_HOME}/restore)"
fi

# stop bubble service
log "Stopping bubble service"
supervisorctl stop bubble

# stop mitmdump service
log "Stopping mitmproxy service"
supervisorctl stop mitmdump

# restore bubble.jar
log "Restoring bubble.jar"
cp ${RESTORE_BASE}/bubble.jar ${BUBBLE_HOME}/current/bubble.jar

# set wasRestored flag in self_node.json
log "Adding wasRestored=true to ${SELF_NODE}"
TEMP_SELF=$(mktemp /tmp/self_node.XXXXXXX.json)
cat ${BUBBLE_SELF_NODE} | jq '.wasRestored = true' > ${TEMP_SELF} || die "Error adding 'wasRestored' flag to ${SELF_NODE}"
cat ${TEMP_SELF} > ${BUBBLE_SELF_NODE} || die "Error rewriting ${SELF_NODE}"

log "Setting ownership of json files to bubble user"
chown bubble ${BUBBLE_HOME}/*.json || die "Error changing ownership of json files to bubble user"

# restore dot files
log "Restoring bubble dotfiles"
cp ${RESTORE_BASE}/dotfiles/.BUBBLE_* ${BUBBLE_HOME}/ || die "Error restoring dotfiles"

# restore mitm configs
log "Restoring mitm certs"
cp -R ${RESTORE_BASE}/mitm_certs ${BUBBLE_HOME}/ || die "Error restoring mitm certs"

# drop and recreate database from backup (but preserve bubble_node and bubble_node_key for current node)
log "Restoring bubble database"
cp ${RESTORE_BASE}/bubble.sql.gz ${BUBBLE_HOME}/sql/ \
&& chown -R bubble ${BUBBLE_HOME}/sql \
&& chgrp -R postgres ${BUBBLE_HOME}/sql \
&& chmod 550 ${BUBBLE_HOME}/sql \
&& chmod 440 ${BUBBLE_HOME}/sql/* || die "Error restoring bubble database archive"
su - postgres bash -c "cd ${BUBBLE_HOME}/sql && full_reset_db.sh drop" || die "Error restoring database"

# Remove old keys
log "Removing node keys"
echo "DELETE FROM bubble_node_key" | bsql.sh

# restore local storage
log "Restoring bubble LocalStorage"
rm -rf ${BUBBLE_HOME}/.bubble_local_storage/* && rsync -ac ${RESTORE_BASE}/LocalStorage/* ${BUBBLE_HOME}/.bubble_local_storage/ || die "Error restoring LocalStorage"

# flush redis
log "Flushing redis"
echo "FLUSHALL" | redis-cli || die "Error flushing redis"

# restore algo configs
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz
if [[ ! -f ${CONFIGS_BACKUP} ]] ; then
log "Warning: Algo VPN configs backup not found: ${CONFIGS_BACKUP}, not installing algo"
else
ALGO_BASE=/root/ansible/roles/algo/algo-master
if [[ ! -d ${ALGO_BASE} ]] ; then
die "Error restoring Algo VPN: directory ${ALGO_BASE} not found"
fi
cd ${ALGO_BASE} && tar xzf ${CONFIGS_BACKUP} || die "Error restoring algo VPN configs"

# install/configure algo
${ALGO_BASE}/install_algo.sh || die "Error configuring or installing algo VPN"

# ensure user monitor is running
supervisorctl restart algo_refresh_users_monitor
fi

# restart mitm proxy service
log "Restarting mitmproxy"
supervisorctl restart mitmdump

# restart bubble service
log "Restore complete: restarting bubble API"
supervisorctl restart bubble

# verify service is running OK
log "Pausing for a bit, then verifying bubble server has successfully restarted after restore"
sleep 60
curl https://$(hostname):${ADMIN_PORT}/api/.bubble || log "Error restarting bubble server"

# remove restore markers, we are done
log "Cleaning up temp files"
rm -f ${RESTORE_MARKER} ${RESTORE_RUN_MARKER}

+ 30
- 0
automation/roles/bubble/files/bubble_role.json View File

@@ -0,0 +1,30 @@
{
"name": "bubble-0.0.1",
"priority": 300,
"template": true,
"config": [
{"name": "ansible_python_interpreter", "value": "/usr/bin/python3"},
{"name": "node_uuid", "value": "[[node.uuid]]"},
{"name": "network_uuid", "value": "[[node.network]]"},
{"name": "admin_port", "value": "[[node.adminPort]]"},
{"name": "ssl_port", "value": "[[configuration.nginxPort]]"},
{"name": "sage_node", "value": "[[sageNode]]"},
{"name": "install_type", "value": "[[installType]]"},
{"name": "default_locale", "value": "[[network.locale]]"},
{"name": "bubble_version", "value": "[[configuration.version]]"},
{"name": "bubble_host", "value": "[[node.fqdn]]"},
{"name": "admin_user", "value": "[[node.user]]"},
{"name": "db_encoding", "value": "UTF-8"},
{"name": "db_locale", "value": "en_US"},
{"name": "db_user", "value": "bubble"},
{"name": "db_name", "value": "bubble"},
{"name": "db_key", "value": "[[dbEncryptionKey]]"},
{"name": "letsencrypt_email", "value": "[[configuration.letsencryptEmail]]"},
{"name": "is_fork", "value": "[[fork]]"},
{"name": "restore_key", "value": "[[restoreKey]]"},
{"name": "restore_timeout", "value": "[[restoreTimeoutSeconds]]"},
{"name": "test_mode", "value": "[[testMode]]"}
],
"optionalConfigNames": ["restore_key", "restore_timeout"],
"tgzB64": ""
}

+ 140
- 0
automation/roles/bubble/files/init_bubble_db.sh View File

@@ -0,0 +1,140 @@
#!/bin/bash

echo "$@" > /tmp/init.args

LOG=/tmp/bubble.err.log

function die {
echo 1>&2 "${1}"
log "${1}"
exit 1
}

function log {
echo "${1}" >> ${LOG}
}

export LANG="en_US.UTF-8"
export LANGUAGE="en_US.UTF-8"
export LC_CTYPE="en_US.UTF-8"
export LC_NUMERIC="en_US.UTF-8"
export LC_TIME="en_US.UTF-8"
export LC_COLLATE="en_US.UTF-8"
export LC_MONETARY="en_US.UTF-8"
export LC_MESSAGES="en_US.UTF-8"
export LC_PAPER="en_US.UTF-8"
export LC_NAME="en_US.UTF-8"
export LC_ADDRESS="en_US.UTF-8"
export LC_TELEPHONE="en_US.UTF-8"
export LC_MEASUREMENT="en_US.UTF-8"
export LC_IDENTIFICATION="en_US.UTF-8"
export LC_ALL=en_US.UTF-8

if [[ "$(whoami)" != "postgres" ]] ; then
echo "Must be run as postgres user"
exit 1
fi

DB_NAME=${1:?no db name provided}
DB_USER=${2:?no db user provided}
IS_FORK=${3:?no fork argument provided}
INSTALL_MODE=${4:?no install mode provided}
DROP_AND_RECREATE=${5}

BUBBLE_HOME=/home/bubble
BUBBLE_JAR=/home/bubble/current/bubble.jar
if [[ ! -f ${BUBBLE_JAR} ]] ; then
die "Bubble jar not found: ${BUBBLE_JAR}"
fi

function user_exists {
username="${1}"
num_users="$(echo "select count(*) from pg_user where usename='${username}'" | psql -qt | egrep -v '^$')"
if [[ -z "${num_users}" || ${num_users} -eq 0 ]] ; then
echo "0"
else
echo "1"
fi
}

function db_exists {
dbname="${1}"
num_dbs="$(echo "select count(*) from pg_database where datname='${dbname}'" | psql -qt | egrep -v '^$')"
if [[ -z "${num_dbs}" || ${num_dbs} -eq 0 ]] ; then
echo "0"
else
echo "1"
fi
}

function count_table_rows {
dbname="${1}"
tname="${2}"
num_rows="$(echo "select count(*) from ${tname}" | psql -qt ${dbname} | egrep -v '^$')"
if [[ -z "${num_rows}" ]] ; then
die "count_table_rows: error counting rows for table ${tname}"
fi
echo ${num_rows}
}

if [[ ! -z "${DROP_AND_RECREATE}" && "${DROP_AND_RECREATE}" == "drop" ]] ; then
dropdb ${DB_NAME} || echo "error dropping DB ${DB_NAME} (will continue)"
dropuser ${DB_USER} || echo "error dropping DB user ${DB_USER} (will continue)"
uuid > ${BUBBLE_HOME}/.BUBBLE_PG_PASSWORD
fi

if [[ $(user_exists ${DB_USER}) -eq 0 ]] ; then
log "Creating user ${DB_USER}"
if [[ "$(echo ${IS_FORK} | tr [[:upper:]] [[:lower:]])" == "true" ]] ; then
createuser --createdb --no-createrole --no-superuser --no-replication ${DB_USER} || die "Error creating user"
else
createuser --no-createdb --no-createrole --no-superuser --no-replication ${DB_USER} || die "Error creating user"
fi
DB_PASS="$(cat ${BUBBLE_HOME}/.BUBBLE_PG_PASSWORD)"
echo "ALTER USER bubble WITH PASSWORD '${DB_PASS}'" | psql || die "Error setting user password"
fi

if [[ $(db_exists ${DB_NAME}) -eq 0 ]] ; then
log "Creating DB ${DB_NAME}"
createdb --encoding=UTF-8 ${DB_NAME} || die "Error creating DB"
fi

if [[ $(count_table_rows ${DB_NAME} account 2> /dev/null) -eq 0 ]] ; then
TEMP_DB="${DB_NAME}_$(uuid | tr -d '-')"
log "Creating tempDB ${TEMP_DB}"
createdb --encoding=UTF-8 ${TEMP_DB} || die "Error creating temp DB"
log "Populating tempDB ${TEMP_DB} with bubble.sq.gz"
zcat /home/bubble/sql/bubble.sql.gz | psql ${TEMP_DB} || die "Error writing database schema/data"
DB_KEY="$(cat ${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY)"
TO_KEY="$(uuid)"
if [[ -z "${TO_KEY}" ]] ; then
dropdb ${TEMP_DB}
die "${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY does not exist or is empty"
fi
log "Dumping schema from ${TEMP_DB} -> ${DB_NAME}"
pg_dump --schema-only ${TEMP_DB} | psql ${DB_NAME}
# log "Rekeying: fromKey=${DB_KEY}, toKey=${TO_KEY}"
java -cp ${BUBBLE_JAR} bubble.main.RekeyDatabaseMain \
--jar ${BUBBLE_JAR} \
--db-user ${DB_USER} \
--db-password "${DB_PASS}" \
--from-db ${TEMP_DB} \
--from-key "${DB_KEY}" \
--to-db ${DB_NAME} \
--to-key "${TO_KEY}" 2>&1 || (dropdb ${TEMP_DB} ; die "Error re-keying database")
# --to-key "${TO_KEY}" 2>&1 | tee -a ${LOG} || (dropdb ${TEMP_DB} ; die "Error re-keying database")
log "Rekey successful, dropping ${TEMP_DB}"
dropdb ${TEMP_DB}
log "Saving ${TO_KEY} to ${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY"
echo -n "${TO_KEY}" > ${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY
fi

echo "DELETE FROM bubble_node_key WHERE node IN (SELECT uuid FROM bubble_node WHERE ip4='127.0.0.1' OR ip4='' OR ip4 IS NULL)" | psql ${DB_NAME} \
|| die "Error removing bubble_node_keys with remote_host=127.0.0.1"
echo "DELETE FROM bubble_node WHERE ip4='127.0.0.1'" | psql ${DB_NAME} \
|| die "Error removing bubble_nodes with ip4=127.0.0.1"

if [[ "${INSTALL_MODE}" == "node" ]] ; then
echo "UPDATE account SET locked=true" | psql ${DB_NAME} \
|| die "Error locking accounts"
fi

+ 61
- 0
automation/roles/bubble/files/init_roles.sh View File

@@ -0,0 +1,61 @@
#!/bin/bash

SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)

LOG=/tmp/$(basename ${0}).log

function die {
echo 1>&2 "${1}"
log "${1}"
exit 1
}

function log {
echo "${1}" | tee -a ${LOG}
}

if [[ $(whoami) != "bubble" ]] ; then
if [[ $(whoami) == "root" ]] ; then
sudo -H -u bubble ${0}
exit $?
fi
die "${0} must be run as bubble"
fi

if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then
if [[ -f "${HOME}/bubble/current/bubble.env" ]] ; then
LOCALSTORAGE_BASE_DIR=$(cat "${HOME}/bubble/current/bubble.env" | grep -v '^#' | grep LOCALSTORAGE_BASE_DIR | awk -F '=' '{print $2}' | tr -d ' ')
fi
fi
if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then
log "LOCALSTORAGE_BASE_DIR env var not defined, using ${HOME}/.bubble_local_storage"
LOCALSTORAGE_BASE_DIR="${HOME}/.bubble_local_storage"
fi

if [[ -z "${BUBBLE_JAR}" ]] ; then
if [[ -f "${HOME}/current/bubble.jar" ]] ; then
BUBBLE_JAR="${HOME}/current/bubble.jar"
fi
fi
if [[ -z "${BUBBLE_JAR}" ]] ; then
die "BUBBLE_JAR env var not set and no jar file found"
fi

ROLE_DIR="${HOME}/role_tgz"
if [[ ! -d "${ROLE_DIR}" ]] ; then
die "role_tgz dir not found: ${ROLE_DIR}"
fi

NETWORK_UUID="$(cat ${HOME}/self_node.json | jq -r .network)"
find ${ROLE_DIR} -type f -name "*.tgz" | while read role_tgz ; do
path="automation/roles/$(basename ${role_tgz})"
dest="${LOCALSTORAGE_BASE_DIR}/${NETWORK_UUID}/${path}"
if [[ ! -f ${dest} ]] ; then
mkdir -p $(dirname ${dest}) || die "Error creating destination directory"
cp ${role_tgz} ${dest} || die "Error copying role archive"
log "installed role ${role_tgz} -> ${dest}"
else
log "role already installed ${role_tgz} -> ${dest}"
fi
done

+ 99
- 0
automation/roles/bubble/files/pg_hba.conf View File

@@ -0,0 +1,99 @@
# PostgreSQL Client Authentication Configuration File
# ===================================================
#
# Refer to the "Client Authentication" section in the PostgreSQL
# documentation for a complete description of this file. A short
# synopsis follows.
#
# This file controls: which hosts are allowed to connect, how clients
# are authenticated, which PostgreSQL user names they can use, which
# databases they can access. Records take one of these forms:
#
# local DATABASE USER METHOD [OPTIONS]
# host DATABASE USER ADDRESS METHOD [OPTIONS]
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
#
# (The uppercase items must be replaced by actual values.)
#
# The first field is the connection type: "local" is a Unix-domain
# socket, "host" is either a plain or SSL-encrypted TCP/IP socket,
# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a
# plain TCP/IP socket.
#
# DATABASE can be "all", "sameuser", "samerole", "replication", a
# database name, or a comma-separated list thereof. The "all"
# keyword does not match "replication". Access to replication
# must be enabled in a separate record (see example below).
#
# USER can be "all", a user name, a group name prefixed with "+", or a
# comma-separated list thereof. In both the DATABASE and USER fields
# you can also write a file name prefixed with "@" to include names
# from a separate file.
#
# ADDRESS specifies the set of hosts the record matches. It can be a
# host name, or it is made up of an IP address and a CIDR mask that is
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
# specifies the number of significant bits in the mask. A host name
# that starts with a dot (.) matches a suffix of the actual host name.
# Alternatively, you can write an IP address and netmask in separate
# columns to specify the set of hosts. Instead of a CIDR-address, you
# can write "samehost" to match any of the server's own IP addresses,
# or "samenet" to match any address in any subnet that the server is
# directly connected to.
#
# METHOD can be "trust", "reject", "md5", "password", "gss", "sspi",
# "ident", "peer", "pam", "ldap", "radius" or "cert". Note that
# "password" sends passwords in clear text; "md5" is preferred since
# it sends encrypted passwords.
#
# OPTIONS are a set of options for the authentication in the format
# NAME=VALUE. The available options depend on the different
# authentication methods -- refer to the "Client Authentication"
# section in the documentation for a list of which options are
# available for which authentication methods.
#
# Database and user names containing spaces, commas, quotes and other
# special characters must be quoted. Quoting one of the keywords
# "all", "sameuser", "samerole" or "replication" makes the name lose
# its special character, and just match a database or username with
# that name.
#
# This file is read on server startup and when the postmaster receives
# a SIGHUP signal. If you edit the file on a running system, you have
# to SIGHUP the postmaster for the changes to take effect. You can
# use "pg_ctl reload" to do that.

# Put your actual configuration here
# ----------------------------------
#
# If you want to allow non-local connections, you need to add more
# "host" records. In that case you will also need to make PostgreSQL
# listen on a non-local interface via the listen_addresses
# configuration parameter, or via the -i or -h command line switches.




# DO NOT DISABLE!
# If you change this first entry you will need to make sure that the
# database superuser can access the database using some other method.
# Noninteractive access to all databases is required during automatic
# maintenance (custom daily cronjobs, replication, and similar tasks).
#
# Database administrative login by Unix domain socket
local all postgres peer

# TYPE DATABASE USER ADDRESS METHOD

# "local" is for Unix domain socket connections only
local all all peer
# IPv4 local connections:
host all all 127.0.0.1/32 md5
# IPv6 local connections:
host all all ::1/128 md5
# Allow replication connections from localhost, by a user with the
# replication privilege.
#local replication postgres peer
#host replication postgres 127.0.0.1/32 md5
#host replication postgres ::1/128 md5

+ 614
- 0
automation/roles/bubble/files/postgresql.conf View File

@@ -0,0 +1,614 @@
# -----------------------------
# PostgreSQL configuration file
# -----------------------------
#
# This file consists of lines of the form:
#
# name = value
#
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
# "#" anywhere on a line. The complete list of parameter names and allowed
# values can be found in the PostgreSQL documentation.
#
# The commented-out settings shown in this file represent the default values.
# Re-commenting a setting is NOT sufficient to revert it to the default value;
# you need to reload the server.
#
# This file is read on server startup and when the server receives a SIGHUP
# signal. If you edit the file on a running system, you have to SIGHUP the
# server for the changes to take effect, or use "pg_ctl reload". Some
# parameters, which are marked below, require a server shutdown and restart to
# take effect.
#
# Any parameter can also be given as a command-line option to the server, e.g.,
# "postgres -c log_connections=on". Some parameters can be changed at run time
# with the "SET" SQL command.
#
# Memory units: kB = kilobytes Time units: ms = milliseconds
# MB = megabytes s = seconds
# GB = gigabytes min = minutes
# TB = terabytes h = hours
# d = days


#------------------------------------------------------------------------------
# FILE LOCATIONS
#------------------------------------------------------------------------------

# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.

data_directory = '/var/lib/postgresql/9.4/main' # use data in another directory
# (change requires restart)
hba_file = '/etc/postgresql/9.4/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
ident_file = '/etc/postgresql/9.4/main/pg_ident.conf' # ident configuration file
# (change requires restart)

# If external_pid_file is not explicitly set, no extra PID file is written.
external_pid_file = '/var/run/postgresql/9.4-main.pid' # write an extra PID file
# (change requires restart)


#------------------------------------------------------------------------------
# CONNECTIONS AND AUTHENTICATION
#------------------------------------------------------------------------------

# - Connection Settings -

listen_addresses = 'localhost,127.0.0.1' # what IP address(es) to listen on;
# comma-separated list of addresses;
# defaults to 'localhost'; use '*' for all
# (change requires restart)
port = 5432 # (change requires restart)
max_connections = 197 # (change requires restart)
# Note: Increasing max_connections costs ~400 bytes of shared memory per
# connection slot, plus lock space (see max_locks_per_transaction).
superuser_reserved_connections = 3 # (change requires restart)
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
# (change requires restart)
#unix_socket_group = '' # (change requires restart)
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
# (change requires restart)
#bonjour = off # advertise server via Bonjour
# (change requires restart)
#bonjour_name = '' # defaults to the computer name
# (change requires restart)

# - Security and Authentication -

#authentication_timeout = 1min # 1s-600s
ssl = true # (change requires restart)
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
# (change requires restart)
#ssl_prefer_server_ciphers = on # (change requires restart)
#ssl_ecdh_curve = 'prime256v1' # (change requires restart)
#ssl_renegotiation_limit = 512MB # amount of data between renegotiations
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' # (change requires restart)
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' # (change requires restart)
#ssl_ca_file = '' # (change requires restart)
#ssl_crl_file = '' # (change requires restart)
#password_encryption = on
#db_user_namespace = off

# GSSAPI using Kerberos
#krb_server_keyfile = ''
#krb_caseins_users = off

# - TCP Keepalives -
# see "man 7 tcp" for details

#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
# 0 selects the system default
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
# 0 selects the system default
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
# 0 selects the system default


#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
#------------------------------------------------------------------------------

# - Memory -

shared_buffers = 128MB # min 128kB
# (change requires restart)
#huge_pages = try # on, off, or try
# (change requires restart)
#temp_buffers = 8MB # min 800kB
#max_prepared_transactions = 0 # zero disables the feature
# (change requires restart)
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
# per transaction slot, plus lock space (see max_locks_per_transaction).
# It is not advisable to set max_prepared_transactions nonzero unless you
# actively intend to use prepared transactions.
#work_mem = 4MB # min 64kB
#maintenance_work_mem = 64MB # min 1MB
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
#max_stack_depth = 2MB # min 100kB
dynamic_shared_memory_type = posix # the default is the first option
# supported by the operating system:
# posix
# sysv
# windows
# mmap
# use none to disable dynamic shared memory

# - Disk -

#temp_file_limit = -1 # limits per-session temp file space
# in kB, or -1 for no limit

# - Kernel Resource Usage -

#max_files_per_process = 1000 # min 25
# (change requires restart)
#shared_preload_libraries = '' # (change requires restart)

# - Cost-Based Vacuum Delay -

#vacuum_cost_delay = 0 # 0-100 milliseconds
#vacuum_cost_page_hit = 1 # 0-10000 credits
#vacuum_cost_page_miss = 10 # 0-10000 credits
#vacuum_cost_page_dirty = 20 # 0-10000 credits
#vacuum_cost_limit = 200 # 1-10000 credits

# - Background Writer -

#bgwriter_delay = 200ms # 10-10000ms between rounds
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round

# - Asynchronous Behavior -

#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8


#------------------------------------------------------------------------------
# WRITE AHEAD LOG
#------------------------------------------------------------------------------

# - Settings -

#wal_level = minimal # minimal, archive, hot_standby, or logical
# (change requires restart)
#fsync = on # turns forced synchronization on or off
#synchronous_commit = on # synchronization level;
# off, local, remote_write, or on
#wal_sync_method = fsync # the default is the first option
# supported by the operating system:
# open_datasync
# fdatasync (default on Linux)
# fsync
# fsync_writethrough
# open_sync
#full_page_writes = on # recover from partial page writes
#wal_log_hints = off # also do full page writes of non-critical updates
# (change requires restart)
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
# (change requires restart)
#wal_writer_delay = 200ms # 1-10000 milliseconds

#commit_delay = 0 # range 0-100000, in microseconds
#commit_siblings = 5 # range 1-1000

# - Checkpoints -

#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each
#checkpoint_timeout = 5min # range 30s-1h
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
#checkpoint_warning = 30s # 0 disables

# - Archiving -

#archive_mode = off # allows archiving to be done
# (change requires restart)
#archive_command = '' # command to use to archive a logfile segment
# placeholders: %p = path of file to archive
# %f = file name only
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
#archive_timeout = 0 # force a logfile segment switch after this
# number of seconds; 0 disables


#------------------------------------------------------------------------------
# REPLICATION
#------------------------------------------------------------------------------

# - Sending Server(s) -

# Set these on the master and on any standby that will send replication data.

#max_wal_senders = 0 # max number of walsender processes
# (change requires restart)
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables

#max_replication_slots = 0 # max number of replication slots
# (change requires restart)

# - Master Server -

# These settings are ignored on a standby server.

#synchronous_standby_names = '' # standby servers that provide sync rep
# comma-separated list of application_name
# from standby(s); '*' = all
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed

# - Standby Servers -

# These settings are ignored on a master server.

#hot_standby = off # "on" allows queries during recovery
# (change requires restart)
#max_standby_archive_delay = 30s # max delay before canceling queries
# when reading WAL from archive;
# -1 allows indefinite delay
#max_standby_streaming_delay = 30s # max delay before canceling queries
# when reading streaming WAL;
# -1 allows indefinite delay
#wal_receiver_status_interval = 10s # send replies at least this often
# 0 disables
#hot_standby_feedback = off # send info from standby to prevent
# query conflicts
#wal_receiver_timeout = 60s # time that receiver waits for
# communication from master
# in milliseconds; 0 disables


#------------------------------------------------------------------------------
# QUERY TUNING
#------------------------------------------------------------------------------

# - Planner Method Configuration -

#enable_bitmapscan = on
#enable_hashagg = on
#enable_hashjoin = on
#enable_indexscan = on
#enable_indexonlyscan = on
#enable_material = on
#enable_mergejoin = on
#enable_nestloop = on
#enable_seqscan = on
#enable_sort = on
#enable_tidscan = on

# - Planner Cost Constants -

#seq_page_cost = 1.0 # measured on an arbitrary scale
#random_page_cost = 4.0 # same scale as above
#cpu_tuple_cost = 0.01 # same scale as above
#cpu_index_tuple_cost = 0.005 # same scale as above
#cpu_operator_cost = 0.0025 # same scale as above
#effective_cache_size = 4GB

# - Genetic Query Optimizer -

#geqo = on
#geqo_threshold = 12
#geqo_effort = 5 # range 1-10
#geqo_pool_size = 0 # selects default based on effort
#geqo_generations = 0 # selects default based on effort
#geqo_selection_bias = 2.0 # range 1.5-2.0
#geqo_seed = 0.0 # range 0.0-1.0

# - Other Planner Options -

#default_statistics_target = 100 # range 1-10000
#constraint_exclusion = partition # on, off, or partition
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
#from_collapse_limit = 8
#join_collapse_limit = 8 # 1 disables collapsing of explicit
# JOIN clauses


#------------------------------------------------------------------------------
# ERROR REPORTING AND LOGGING
#------------------------------------------------------------------------------

# - Where to Log -

#log_destination = 'stderr' # Valid values are combinations of
# stderr, csvlog, syslog, and eventlog,
# depending on platform. csvlog
# requires logging_collector to be on.

# This is used when logging to stderr:
#logging_collector = off # Enable capturing of stderr and csvlog
# into log files. Required to be on for
# csvlogs.
# (change requires restart)

# These are only used if logging_collector is on:
#log_directory = 'pg_log' # directory where log files are written,
# can be absolute or relative to PGDATA
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
# can include strftime() escapes
#log_file_mode = 0600 # creation mode for log files,
# begin with 0 to use octal notation
#log_truncate_on_rotation = off # If on, an existing log file with the
# same name as the new log file will be
# truncated rather than appended to.
# But such truncation only occurs on
# time-driven rotation, not on restarts
# or size-driven rotation. Default is
# off, meaning append to existing files
# in all cases.
#log_rotation_age = 1d # Automatic rotation of logfiles will
# happen after that time. 0 disables.
#log_rotation_size = 10MB # Automatic rotation of logfiles will
# happen after that much log output.
# 0 disables.

# These are relevant when logging to syslog:
#syslog_facility = 'LOCAL0'
#syslog_ident = 'postgres'

# This is only relevant when logging to eventlog (win32):
#event_source = 'PostgreSQL'

# - When to Log -

#client_min_messages = notice # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# log
# notice
# warning
# error

#log_min_messages = warning # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic

#log_min_error_statement = error # values in order of decreasing detail:
# debug5
# debug4
# debug3
# debug2
# debug1
# info
# notice
# warning
# error
# log
# fatal
# panic (effectively off)

#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
# and their durations, > 0 logs only
# statements running at least this number
# of milliseconds


# - What to Log -

#debug_print_parse = off
#debug_print_rewritten = off
#debug_print_plan = off
#debug_pretty_print = on
#log_checkpoints = off
#log_connections = off
#log_disconnections = off
#log_duration = off
#log_error_verbosity = default # terse, default, or verbose messages
#log_hostname = off
log_line_prefix = '%t [%p-%l] %q%u@%d ' # special values:
# %a = application name
# %u = user name
# %d = database name
# %r = remote host and port
# %h = remote host
# %p = process ID
# %t = timestamp without milliseconds
# %m = timestamp with milliseconds
# %i = command tag
# %e = SQL state
# %c = session ID
# %l = session line number
# %s = session start timestamp
# %v = virtual transaction ID
# %x = transaction ID (0 if none)
# %q = stop here in non-session
# processes
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
#log_statement = 'none' # none, ddl, mod, all
#log_temp_files = -1 # log temporary files equal or larger
# than the specified size in kilobytes;
# -1 disables, 0 logs all temp files
log_timezone = 'localtime'


#------------------------------------------------------------------------------
# RUNTIME STATISTICS
#------------------------------------------------------------------------------

# - Query/Index Statistics Collector -

#track_activities = on
#track_counts = on
#track_io_timing = off
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#update_process_title = on
stats_temp_directory = '/var/run/postgresql/9.4-main.pg_stat_tmp'


# - Statistics Monitoring -

#log_parser_stats = off
#log_planner_stats = off
#log_executor_stats = off
#log_statement_stats = off


#------------------------------------------------------------------------------
# AUTOVACUUM PARAMETERS
#------------------------------------------------------------------------------

#autovacuum = on # Enable autovacuum subprocess? 'on'
# requires track_counts to also be on.
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
# their durations, > 0 logs only
# actions running at least this number
# of milliseconds.
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
# (change requires restart)
#autovacuum_naptime = 1min # time between autovacuum runs
#autovacuum_vacuum_threshold = 50 # min number of row updates before
# vacuum
#autovacuum_analyze_threshold = 50 # min number of row updates before
# analyze
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
# (change requires restart)
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
# before forced vacuum
# (change requires restart)
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
# autovacuum, in milliseconds;
# -1 means use vacuum_cost_delay
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
# autovacuum, -1 means use
# vacuum_cost_limit


#------------------------------------------------------------------------------
# CLIENT CONNECTION DEFAULTS
#------------------------------------------------------------------------------

# - Statement Behavior -

#search_path = '"$user",public' # schema names
#default_tablespace = '' # a tablespace name, '' uses the default
#temp_tablespaces = '' # a list of tablespace names, '' uses
# only default tablespace
#check_function_bodies = on
#default_transaction_isolation = 'read committed'
#default_transaction_read_only = off
#default_transaction_deferrable = off
#session_replication_role = 'origin'
#statement_timeout = 0 # in milliseconds, 0 is disabled
#lock_timeout = 0 # in milliseconds, 0 is disabled
#vacuum_freeze_min_age = 50000000
#vacuum_freeze_table_age = 150000000
#vacuum_multixact_freeze_min_age = 5000000
#vacuum_multixact_freeze_table_age = 150000000
#bytea_output = 'hex' # hex, escape
#xmlbinary = 'base64'
#xmloption = 'content'

# - Locale and Formatting -

datestyle = 'iso, mdy'
#intervalstyle = 'postgres'
timezone = 'localtime'
#timezone_abbreviations = 'Default' # Select the set of available time zone
# abbreviations. Currently, there are
# Default
# Australia (historical usage)
# India
# You can create your own file in
# share/timezonesets/.
#extra_float_digits = 0 # min -15, max 3
#client_encoding = sql_ascii # actually, defaults to database
# encoding

# These settings are initialized by initdb, but they can be changed.
lc_messages = 'en_US.UTF-8' # locale for system error message
# strings
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting
lc_numeric = 'en_US.UTF-8' # locale for number formatting
lc_time = 'en_US.UTF-8' # locale for time formatting

# default configuration for text search
default_text_search_config = 'pg_catalog.english'

# - Other Defaults -

#dynamic_library_path = '$libdir'
#local_preload_libraries = ''
#session_preload_libraries = ''


#------------------------------------------------------------------------------
# LOCK MANAGEMENT
#------------------------------------------------------------------------------

#deadlock_timeout = 1s
#max_locks_per_transaction = 64 # min 10
# (change requires restart)
# Note: Each lock table slot uses ~270 bytes of shared memory, and there are
# max_locks_per_transaction * (max_connections + max_prepared_transactions)
# lock table slots.
#max_pred_locks_per_transaction = 64 # min 10
# (change requires restart)


#------------------------------------------------------------------------------
# VERSION/PLATFORM COMPATIBILITY
#------------------------------------------------------------------------------

# - Previous PostgreSQL Versions -

#array_nulls = on
#backslash_quote = safe_encoding # on, off, or safe_encoding
#default_with_oids = off
#escape_string_warning = on
#lo_compat_privileges = off
#quote_all_identifiers = off
#sql_inheritance = on
#standard_conforming_strings = on
#synchronize_seqscans = on

# - Other Platforms and Clients -

#transform_null_equals = off


#------------------------------------------------------------------------------
# ERROR HANDLING
#------------------------------------------------------------------------------

#exit_on_error = off # terminate session on any error?
#restart_after_crash = on # reinitialize after backend crash?


#------------------------------------------------------------------------------
# CONFIG FILE INCLUDES
#------------------------------------------------------------------------------

# These options allow settings to be loaded from files other than the
# default postgresql.conf.

#include_dir = 'conf.d' # include files ending in '.conf' from
# directory 'conf.d'
#include_if_exists = 'exists.conf' # include file only if it exists
#include = 'special.conf' # include file


#------------------------------------------------------------------------------
# CUSTOMIZED OPTIONS
#------------------------------------------------------------------------------

# Add settings for extensions here

+ 9
- 0
automation/roles/bubble/files/random_password.sh View File

@@ -0,0 +1,9 @@
#!/bin/bash

file=${1:?no file provided}
owner=${2:?no owner provided}
group=${3:?no group provided}

if [[ ! -f ${file} ]] ; then
touch ${file} && chmod 660 ${file} && chown ${owner} ${file} && chgrp ${group} ${file} && uuid | tr -d '\n' > ${file}
fi

+ 6
- 0
automation/roles/bubble/handlers/main.yml View File

@@ -0,0 +1,6 @@
---
- name: Start Pgsql
service: name=postgresql state=started

- name: Restart Pgsql
service: name=postgresql state=restarted

+ 135
- 0
automation/roles/bubble/tasks/main.yml View File

@@ -0,0 +1,135 @@
- name: Install OpenJDK 11 JRE (headless), redis, uuid and jq
apt:
name: [ 'openjdk-11-jre-headless', 'redis', 'uuid', 'jq' ]
state: present
update_cache: yes

- import_tasks: postgresql.yml

- name: Create bubble user
user:
name: bubble
comment: bubble user
shell: /bin/bash
system: yes
home: /home/bubble

- name: Creates bubble API version dir
file:
path: /home/bubble/bubble_{{ bubble_version }}
owner: bubble
group: bubble
mode: 0555
state: directory

- name: Creates bubble logs dir
file:
path: /home/bubble/logs
owner: bubble
group: root
mode: 0770
state: directory

- name: Creates bubble ansible archives dir
file:
path: /home/bubble/role_tgz
owner: bubble
group: root
mode: 0770
state: directory

- name: Install bubble jar
copy:
src: "{{ item }}"
dest: /home/bubble/bubble_{{ bubble_version }}/{{ item }}
owner: bubble
group: bubble
mode: 0444
with_items:
- bubble.jar

- name: Install bubble self_node.json, sage_node.json and sage_key.json
copy:
src: "{{ item }}"
dest: /home/bubble/{{ item }}
owner: bubble
group: bubble
mode: 0600
with_items:
- self_node.json
- sage_node.json
- sage_key.json

- name: Install helper scripts
copy:
src: "{{ item }}"
dest: "/usr/local/bin/{{ item }}"
owner: root
group: root
mode: 0555
with_items:
- "bsql.sh"
- "random_password.sh"
- "init_roles.sh"

- name: Install standard bubble scripts
copy:
src: "{{ item }}"
dest: "/usr/local/bin/"
owner: root
group: root
mode: 0555
with_fileglob:
- "scripts/*"

- name: Install helper template scripts
template:
src: "{{ item.src }}"
dest: "/usr/local/bin/{{ item.dest }}"
owner: root
group: root
mode: 0555
with_items:
- { src: "full_reset_db.sh.j2", dest: "full_reset_db.sh" }
- { src: "snapshot_ansible.sh.j2", dest: "snapshot_ansible.sh" }

- name: Generate keys
shell: random_password.sh /home/bubble/{{ item.file }} bubble {{ item.group }}
with_items:
- { file: '.BUBBLE_REDIS_ENCRYPTION_KEY', group: root }
- { file: '.BUBBLE_DB_ENCRYPTION_KEY', group: postgres } # postgres user needs access to DB key
- { file: '.BUBBLE_PG_PASSWORD', group: postgres } # postgres user needs access to DB password

- name: Write DB key
shell: echo -n "{{ db_key }}" > /home/bubble/.BUBBLE_DB_ENCRYPTION_KEY

- name: Copy ansible role archives
copy:
src: 'role_tgz/'
dest: '/home/bubble/role_tgz/'
owner: bubble
group: bubble
mode: 0400

- name: Write bubble env file
template:
src: bubble.env.j2
dest: /home/bubble/bubble_{{ bubble_version }}/bubble.env
owner: bubble
group: bubble
mode: 0400

- name: Link current version to the one we just installed
file:
src: /home/bubble/bubble_{{ bubble_version }}
dest: /home/bubble/current
owner: bubble
group: bubble
state: link

- name: Initialize local storage with role archive
shell: init_roles.sh

- import_tasks: postgresql_data.yml

- import_tasks: restore.yml

+ 16
- 0
automation/roles/bubble/tasks/postgresql.yml View File

@@ -0,0 +1,16 @@
- name: Install PostgreSQL
apt:
name: [ 'postgresql-10', 'libpq-dev', 'python-psycopg2' ]
state: present
update_cache: yes

- name: Install PostgreSQL files
copy:
src: "{{ item }}"
dest: /etc/postgresql/10/main/
owner: postgres
group: postgres
mode: 0400
with_items:
- postgresql.conf
- pg_hba.conf

+ 30
- 0
automation/roles/bubble/tasks/postgresql_data.yml View File

@@ -0,0 +1,30 @@
- name: Creates bubble SQL dir
file:
path: /home/bubble/sql
owner: bubble
group: postgres
mode: 0550
state: directory

- name: Install SQL schema files
copy:
src: "{{ item }}"
dest: /home/bubble/sql/{{ item }}
owner: bubble
group: postgres
mode: 0440
with_items:
- "bubble.sql.gz"

- name: Install DB initializer
copy:
src: "{{ item }}"
dest: "/usr/local/bin/{{ item }}"
owner: root
group: postgres
mode: 0550
with_items:
- init_bubble_db.sh

- name: Populate database
shell: sudo -H -u postgres bash -c "cd && full_reset_db.sh"

+ 15
- 0
automation/roles/bubble/tasks/restore.yml View File

@@ -0,0 +1,15 @@

- name: Install restore helper scripts
copy:
src: '{{ item }}'
dest: "/usr/local/bin/{{ item }}"
owner: root
group: postgres
mode: 0550
with_items:
- "bubble_restore_monitor.sh"
when: restore_key is defined

- name: Start restore monitor
shell: bash -c 'nohup /usr/local/bin/bubble_restore_monitor.sh {{ admin_port }} {{ restore_timeout }} > /dev/null &'
when: restore_key is defined

+ 9
- 0
automation/roles/bubble/templates/bubble.env.j2 View File

@@ -0,0 +1,9 @@
export PUBLIC_BASE_URI=https://{{ bubble_host }}:{{ ssl_port }}
export SELF_NODE={{ node_uuid }}
export SAGE_NODE={{ sage_node }}
export BUBBLE_JAR=/home/bubble/current/bubble.jar
export BUBBLE_RUN_DNS=true
export LETSENCRYPT_EMAIL={{ letsencrypt_email }}
export BUBBLE_SERVER_PORT={{ admin_port }}
export BUBBLE_TEST_MODE={{ test_mode }}
export BUBBLE_DEFAULT_LOCALE={{ default_locale }}

+ 19
- 0
automation/roles/bubble/templates/full_reset_db.sh.j2 View File

@@ -0,0 +1,19 @@
#!/bin/bash

function die {
echo 1>&2 "${1}"
exit 1
}

if [[ $(whoami) == "root" ]] ; then
su - postgres ${0} ${@}
exit $?
fi

if [[ $(whoami) != "postgres" ]] ; then
die "${0} : must be run as postgres user"
fi

cd ~bubble/sql \
&& init_bubble_db.sh {{ db_name }} {{ db_user }} {{ is_fork }} {{ install_type }} ${1} \
|| die "error reinitializing database"

+ 34
- 0
automation/roles/bubble/templates/snapshot_ansible.sh.j2 View File

@@ -0,0 +1,34 @@
#!/bin/bash

SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)

LOG=/tmp/$(basename ${0}).log

function die {
echo 1>&2 "${1}"
log "${1}"
exit 1
}

function log {
echo "${1}" | tee -a ${LOG}
}

if [[ $(whoami) != "{{ admin_user }}" ]] ; then
if [[ $(whoami) == "root" ]] ; then
sudo -H -u "{{ admin_user }}" ${0}
exit $?
fi
die "${0} must be run as {{ admin_user }}"
fi

ANSIBLE_USER_HOME=$(cd "~{{ admin_user }}" && pwd)

ANSIBLE_SNAPSHOT="/home/bubble/ansible.tgz"

cd ${ANSIBLE_USER_HOME} \
&& tar czf ${ANSIBLE_SNAPSHOT} ./ansible \
&& chmod 400 ${ANSIBLE_SNAPSHOT} \
&& chown bubble ${ANSIBLE_SNAPSHOT} \
|| die "Error creating ansible snapshot"

+ 11
- 0
automation/roles/bubble_finalizer/files/bubble_role.json View File

@@ -0,0 +1,11 @@
{
"name": "bubble_finalizer-0.0.1",
"priority": 9999999,
"template": true,
"config": [
{"name": "restore_key", "value": "[[restoreKey]]"},
{"name": "install_type", "value": "[[installType]]"}
],
"optionalConfigNames": ["restore_key"],
"tgzB64": ""
}

+ 30
- 0
automation/roles/bubble_finalizer/files/copy_certs_to_bubble.sh View File

@@ -0,0 +1,30 @@
#!/bin/bash

function die {
echo 1>&2 "${1}"
exit 1
}

MITM_CERTS=/home/mitmproxy/.mitmproxy
chown -R mitmproxy ${MITM_CERTS} || die "Error setting ownership on ${MITM_CERTS}"
chgrp -R root ${MITM_CERTS} || die "Error setting group on ${MITM_CERTS}"
chmod 750 ${MITM_CERTS} || die "Error setting permissions on ${MITM_CERTS}"
chmod -R 440 ${MITM_CERTS}/* || die "Error setting permissions on ${MITM_CERTS} files"

CERTS_DIR=/home/bubble/cacerts
MITM_BASE_NAME="bubble-ca"
mkdir -p ${CERTS_DIR} || die "Error creating cacerts dir"
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.pem ${CERTS_DIR} || die "Error copying pem cert"
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.pem.crt ${CERTS_DIR}/${MITM_BASE_NAME}-cert.crt || die "Error copying crt cert"
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.p12 ${CERTS_DIR} || die "Error copying p12 cert"
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.cer ${CERTS_DIR} || die "Error copying cer cert"
chown -R bubble ${CERTS_DIR} || die "Error setting permissions on cacerts dir"
chmod 755 ${CERTS_DIR} || die "Error setting permissions on ${CERTS_DIR}"
chmod -R 444 ${CERTS_DIR}/* || die "Error setting permissions on ${CERTS_DIR} files"

CERTS_BACKUP=/home/bubble/mitm_certs
mkdir -p ${CERTS_BACKUP} || die "Error creating mitm_certs dir"
chmod 700 ${CERTS_BACKUP} || die "Error setting permissions on mitm_certs dir"
cp ${MITM_CERTS}/* ${CERTS_BACKUP} || die "Error backing up mitm_certs"
chmod -R 400 ${CERTS_BACKUP}/* || die "Error setting permissions on mitm_certs backup"
chown -R bubble ${CERTS_BACKUP} || die "Error settings ownership of mitm_certs dir"

+ 9
- 0
automation/roles/bubble_finalizer/files/supervisor_bubble.conf View File

@@ -0,0 +1,9 @@

[program:bubble]
stdout_logfile = /home/bubble/logs/bubble-out.log
stderr_logfile = /home/bubble/logs/bubble-err.log
command=sudo -u bubble bash -c "/usr/bin/java \
-Djava.net.preferIPv4Stack=true -Xmx512m \
-cp /home/bubble/current/bubble.jar \
bubble.server.BubbleServer \
/home/bubble/current/bubble.env"

+ 40
- 0
automation/roles/bubble_finalizer/tasks/main.yml View File

@@ -0,0 +1,40 @@
- name: Snapshot ansible roles
shell: snapshot_ansible.sh

- name: Touch first-time setup file
shell: su - bubble bash -c "if [[ ! -f /home/bubble/first_time_marker ]] ; then echo -n install > /home/bubble/first_time_marker ; fi"
when: restore_key is not defined

- name: Touch first-time setup file (restore)
shell: su - bubble bash -c "if [[ ! -f /home/bubble/first_time_marker ]] ; then echo -n restore > /home/bubble/first_time_marker ; fi"
when: restore_key is defined

- name: Install mitmproxy CA cert in local CA store
shell: install_cert.sh /home/mitmproxy/.mitmproxy/bubble-ca-cert.pem 600
when: install_type == 'node'

- name: Install copy_certs_to_bubble.sh helper
copy:
src: "copy_certs_to_bubble.sh"
dest: /usr/local/bin/copy_certs_to_bubble.sh
owner: bubble
group: root
mode: 0550
when: install_type == 'node'

- name: Install mitmproxy public certs in bubble dir
shell: /usr/local/bin/copy_certs_to_bubble.sh
when: install_type == 'node'

- name: Install bubble supervisor conf file
copy:
src: supervisor_bubble.conf
dest: /etc/supervisor/conf.d/bubble.conf

# We cannot receive notifications until nginx is running, so start bubble API as the very last step
- name: Ensure bubble is started
supervisorctl:
name: '{{ item }}'
state: restarted
with_items:
- bubble

+ 9
- 0
automation/roles/common/files/bubble_role.json View File

@@ -0,0 +1,9 @@
{
"name": "common-0.0.1",
"priority": 100,
"template": true,
"config": [
{"name": "hostname", "value": "[[node.fqdn]]"}
],
"tgzB64": ""
}

+ 24
- 0
automation/roles/common/files/dot-screenrc View File

@@ -0,0 +1,24 @@
bindkey -d ^J command
bind "\011" windows
escape ^Jj
defescape ^Jj
bind o focus
bind , prev
bind . next
bind k remove

bind u focus down
bind i focus up
bind t focus top
bind b focus bottom

vbell off

# hardstatus on
# hardstatus alwayslastline

defscrollback 50000

# Make it a login shell -- needed for rvm (Ruby)
shell -${SHELL}
term xterm-256color

+ 36
- 0
automation/roles/common/tasks/main.yml View File

@@ -0,0 +1,36 @@
- name: Set hostname to {{ hostname }}
hostname:
name: '{{ hostname }}'

- name: Update packages
apt:
update_cache: yes

- name: Remove ufw
apt:
name: ufw
state: absent
update_cache: yes

- name: Upgrade packages
apt:
update_cache: yes
upgrade: yes

- name: Install common packages
apt:
name: [ 'ntp', 'unzip', 'safe-rm', 'supervisor', 'emacs-nox', 'screen', 'xtail', 'fail2ban' ]
state: present
update_cache: yes

- name: Install screenrc file
copy:
src: dot-screenrc
dest: /root/.screenrc

- name: Start common services
service:
name: '{{ item }}'
state: restarted
with_items:
- fail2ban

+ 5
- 0
automation/roles/firewall/defaults/main.yml View File

@@ -0,0 +1,5 @@
fw_enable_http: true
fw_enable_admin: true
fw_enable_dns: true
fw_enable_ssh: true
primary_net_interface: ens3

+ 180
- 0
automation/roles/firewall/files/bubble_peer_manager.py View File

@@ -0,0 +1,180 @@
#!/usr/bin/python3

import json
import logging
import os
import sys
import time
import subprocess

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

EMPTY_PEERS = {'peers': [], 'ports': []}


class PeerPort(object):
def __init__(self, port):
if ':' in port:
self.proto = port[0:port.index(':')]
self.port = port[port.index(':') + 1:]
else:
self.proto = 'tcp'
self.port = port

def __str__(self):
return self.proto + ':' + self.port


def find_peers(port):
out = subprocess.run(['iptables', '-vnL', 'INPUT'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
peers = []
for line in out.stdout.decode('utf-8').split('\n'):
line = line.strip()
if len(line) == 0 or line.startswith('Chain ') or line.startswith('pkts '):
continue
for parts in line.split(' '):
packets = parts[0]
bytes = parts[1]
target = parts[2]
proto = parts[3]
if proto != port.proto:
continue
opt = parts[4]
iface_in = parts[5]
iface_out = parts[6]
source = parts[7]
if source == '0.0.0.0/0':
continue
dest = parts[8]
if parts[9] != port.proto:
continue
if parts[10].startswith('dpt:'):
dest_port = int(parts[10][len('dpt:'):])
if dest_port == port.port:
peers.append(source)
return peers


def add_peers(peers, port):
out = subprocess.run(['iptables', '-vnL', 'INPUT'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
lines = out.stdout.decode('utf-8').split('\n')
insert_at = len(lines) - 2
if insert_at < 2:
raise ValueError('add_peers: insert_at was < 2: '+str(insert_at))
for peer in peers:
logger.info("add_peers: alllowing peer: " + peer + " on port " + port)
out = subprocess.run(['iptables', '-I', 'INPUT', str(insert_at),
'-p', port.proto, '-s', peer + '/32',
'--dport', port.port, '-j', 'ACCEPT'])
logger.info("add_peers: allowed peer: " + peer + " on port " + port)


def remove_peers(peers, port):
for peer in peers:
remove_peer(peer, port)


def remove_peer(peer, port):
out = subprocess.run(['iptables', '-vnL', 'INPUT'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
index = 0
for line in out.stdout.decode('utf-8').split('\n'):
line = line.strip()
if len(line) == 0 or line.startswith('Chain ') or line.startswith('pkts '):
continue
index = index + 1
for parts in line.split(' '):
packets = parts[0]
bytes = parts[1]
target = parts[2]
proto = parts[3]
if proto != port.proto:
continue
opt = parts[4]
iface_in = parts[5]
iface_out = parts[6]
source = parts[7]
if not source.startswith(peer+'/32'):
continue
dest = parts[8]
if parts[9] != port.proto:
continue
if parts[10].startswith('dpt:'):
dest_port = int(parts[10][len('dpt:'):])
if dest_port == port.port:
logger.info("remove_peer: removing peer: " + peer + " on port " + port)
out = subprocess.run(['iptables', '-D', 'INPUT', str(index)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return True
return False


class BubblePeers(object):

def __init__(self, peer_path, self_path):
self.peer_path = peer_path
if os.path.exists(peer_path):
self.last_modified = os.path.getmtime(self.peer_path)
else:
self.last_modified = 0

self.last_update = None
self.peers = []
self.ports = []

self.self_path = self_path
self.self_node = {}

def load_peers(self):
if os.path.exists(self.peer_path):
with open(self.peer_path) as f:
val = json.load(f)
else:
val = EMPTY_PEERS
self.peers = val['peers']
self.ports = []
for port in val['ports']:
self.ports.append(PeerPort(port))

def load_self(self):
if os.path.exists(self.self_path):
with open(self.self_path) as f:
self.self_node = json.load(f)

def monitor(self):
self.load_peers()
self.load_self()
if os.path.exists(self.peer_path):
self.last_modified = os.path.getmtime(self.peer_path)
if self.last_update is None or self.last_update < self.last_modified:
self.load_peers()
for port in self.ports:
peers_on_port = find_peers(port)
peers_to_remove = []
peers_to_add = []
for peer in peers_on_port:
if peer not in self.peers:
peers_to_remove.append(peer)
for peer in self.peers:
if peer not in peers_on_port:
peers_to_add.append(peer)
remove_peers(peers_to_remove, port)
add_peers(peers_to_add, port)


if __name__ == "__main__":
peers = BubblePeers(sys.argv[1], sys.argv[2])
interval = int(sys.argv[3])
try:
while True:
peers.monitor()
time.sleep(interval)
except Exception as e:
logger.error("Unexpected error: " + repr(e))

+ 12
- 0
automation/roles/firewall/files/bubble_role.json View File

@@ -0,0 +1,12 @@
{
"name": "firewall-0.0.1",
"priority": 200,
"template": true,
"config": [
{"name": "ssl_port", "value": "[[configuration.nginxPort]]"},
{"name": "admin_port", "value": "[[node.adminPort]]"},
{"name": "dns_port", "value": "[[configuration.dnsPort]]"},
{"name": "install_type", "value": "[[installType]]"}
],
"tgzB64": ""
}

+ 5
- 0
automation/roles/firewall/files/supervisor_bubble_peer_manager.conf View File

@@ -0,0 +1,5 @@

[program:bubble_peer_manager]
stdout_logfile = /var/log/bubble_peer_manager-out.log
stderr_logfile = /var/log/bubble_peer_manager-err.log
command=bash -c "/usr/local/bin/bubble_peer_manager.py /home/bubble/peers.json /home/bubble/self_node.json 60"

+ 118
- 0
automation/roles/firewall/tasks/main.yml View File

@@ -0,0 +1,118 @@
- name: Install firewall packages
apt:
name: [ 'haveged', 'iptables-persistent', 'netfilter-persistent', 'autossh' ]
state: present
update_cache: yes

- name: Flush iptables
iptables:
flush: true
become: yes

- name: Flush INPUT chain
iptables:
chain: INPUT
flush: yes
become: yes

- name: Flush OUTPUT chain
iptables:
chain: OUTPUT
flush: yes
become: yes

- name: Flush iptables nat table
iptables:
flush: yes
table: nat
become: yes

- name: Flush iptables mangle table
iptables:
flush: true
table: mangle
become: yes

- name: Flush iptables raw table
iptables:
flush: true
table: raw
become: yes

- name: Flush OUTPUT chain NAT table
iptables:
chain: OUTPUT
table: nat
flush: yes
become: yes

- name: Flush FORWARD chain
iptables:
chain: FORWARD
flush: yes
become: yes

- name: Flush PREROUTING chain NAT Table
iptables:
chain: PREROUTING
table: nat
flush: yes
become: yes

- name: Delete ufw chains
command: "bash -c 'iptables -F {{ item }} && iptables -X {{ item }} || echo \"chain not found: {{ item }}\"'"
with_items:
- ufw-after-forward
- ufw-after-input
- ufw-after-logging-forward
- ufw-after-logging-input
- ufw-after-logging-output
- ufw-after-output
- ufw-before-forward
- ufw-before-input
- ufw-before-logging-forward
- ufw-before-logging-input
- ufw-before-logging-output
- ufw-before-output
- ufw-reject-forward
- ufw-reject-input
- ufw-reject-output
- ufw-track-forward
- ufw-track-input
- ufw-track-output

- name: Install port manager
copy:
src: bubble_peer_manager.py
dest: /usr/local/bin/bubble_peer_manager.py
owner: root
group: root
mode: 0555
when: fw_enable_admin

- name: Install supervisor conf file for port manager
copy:
src: supervisor_bubble_peer_manager.conf
dest: /etc/supervisor/conf.d/bubble_peer_manager.conf
when: fw_enable_admin

- include: sage.yml
when: install_type == 'sage'

- name: Creates /etc/iptables directory
file:
path: /etc/iptables
state: directory

- name: save iptables v4 rules
shell: iptables-save > /etc/iptables/rules.v4
become: yes

- name: save iptables v6 rules
shell: ip6tables-save > /etc/iptables/rules.v6
become: yes

- supervisorctl:
name: bubble_peer_manager
state: restarted
when: fw_enable_admin

+ 104
- 0
automation/roles/firewall/tasks/port_redirect.yml View File

@@ -0,0 +1,104 @@
- sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes

- sysctl:
name: net.ipv4.conf.all.forwarding
value: 1
sysctl_set: yes

- sysctl:
name: net.ipv6.conf.all.forwarding
value: 1
sysctl_set: yes

- name: "Allow {{ service_name }} over TCP"
iptables:
chain: INPUT
protocol: tcp
destination_port: "{{ public_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new TCP {{ service_name }} connections
become: yes

- name: "Allow {{ service_name }} over TCP on private port"
iptables:
chain: INPUT
protocol: tcp
destination_port: "{{ private_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new TCP {{ service_name }} connections on private port
become: yes

- name: "Allow {{ service_name }} over UDP"
iptables:
chain: INPUT
protocol: udp
destination_port: "{{ public_port }}"
jump: ACCEPT
comment: Accept new UDP {{ service_name }} connections
when: allow_udp is defined
become: yes

- name: "Allow {{ service_name }} over UDP on private port"
iptables:
chain: INPUT
protocol: udp
destination_port: "{{ private_port }}"
jump: ACCEPT
comment: Accept new UDP {{ service_name }} connections on private port
when: allow_udp is defined
become: yes

- name: "Redirect {{ service_name }} TCP port"
iptables:
chain: PREROUTING
table: nat
protocol: tcp
destination_port: "{{ public_port }}"
jump: REDIRECT
to_ports: "{{ private_port }}"
comment: Redirect {{ service_name }} TCP connections
become: yes

- name: Redirect {{ service_name }} UDP port
iptables:
chain: PREROUTING
table: nat
protocol: udp
destination_port: "{{ public_port }}"
jump: REDIRECT
to_ports: "{{ private_port }}"
comment: Redirect {{ service_name }} UDP connections
when: allow_udp is defined
become: yes

- name: "Redirect local {{ service_name }} TCP port"
iptables:
chain: OUTPUT
table: nat
protocol: tcp
destination_port: "{{ public_port }}"
destination: 127.0.0.1
jump: REDIRECT
to_ports: "{{ private_port }}"
comment: Redirect local {{ service_name }} TCP connections
become: yes

- name: "Redirect local {{ service_name }} UDP port"
iptables:
chain: OUTPUT
table: nat
protocol: udp
destination_port: "{{ public_port }}"
destination: 127.0.0.1
jump: REDIRECT
to_ports: "{{ private_port }}"
comment: Redirect local {{ service_name }} UDP connections
when: allow_udp is defined
become: yes

+ 95
- 0
automation/roles/firewall/tasks/sage.yml View File

@@ -0,0 +1,95 @@
- name: Allow all from local
iptables:
chain: INPUT
in_interface: lo
jump: ACCEPT
comment: Allow all from local
become: yes

- name: Allow related and established connections
iptables:
chain: INPUT
ctstate: ESTABLISHED,RELATED
jump: ACCEPT
comment: Allow related and established connections
become: yes

- include: port_redirect.yml
public_port="53"
private_port="{{ dns_port }}"
service_name="DNS"
allow_udp="yes"
when: fw_enable_dns

- name: Allow SSH
iptables:
chain: INPUT
protocol: tcp
destination_port: 22
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new SSH connections
become: yes
when: fw_enable_ssh

- name: Allow HTTP
iptables:
chain: INPUT
protocol: tcp
destination_port: 80
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new HTTP connections
become: yes
when: fw_enable_http

- name: Allow HTTPS
iptables:
chain: INPUT
protocol: tcp
destination_port: 443
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new HTTPS connections
become: yes
when: fw_enable_http

- name: Allow admin HTTPS on port {{ ssl_port }}
iptables:
chain: INPUT
protocol: tcp
destination_port: "{{ ssl_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new admin HTTPS connections
when: fw_enable_admin
become: yes

- name: Drop everything else
iptables:
chain: INPUT
jump: DROP
comment: Drop anything else
become: yes

- name: Creates /etc/iptables directory
file:
path: /etc/iptables
state: directory

- name: save iptables v4 rules
shell: iptables-save > /etc/iptables/rules.v4
become: yes

- name: save iptables v6 rules
shell: ip6tables-save > /etc/iptables/rules.v6
become: yes

- supervisorctl:
name: bubble_peer_manager
state: restarted
when: fw_enable_admin

+ 23
- 0
automation/roles/mitmproxy/files/bubble_api.py View File

@@ -0,0 +1,23 @@
from bubble_config import bubble_network, bubble_port
from mitmproxy import ctx
import requests

HEADER_BUBBLE_MATCHERS='X-Bubble-Matchers'

# todo: cache responses by remote_addr+host for a limited time (1 minute?)
def bubble_matchers (remote_addr, flow, host):
headers = {'X-Forwarded-For': remote_addr}
try:
data = {
'fqdn': host,
'uri': flow.request.path,
'userAgent': flow.request.headers['User-Agent'],
'remoteAddr': flow.client_conn.address[0]
}
response = requests.post('http://127.0.0.1:'+bubble_port+'/api/filter/matchers', headers=headers, data=data)
if response.ok:
return response.json()
ctx.log.warn('bubble_matchers returned '+response.status_code+', returning empty matchers array')
except Exception as e:
ctx.log.warn('bubble_matchers API call failed: '+repr(e))
return []

+ 48
- 0
automation/roles/mitmproxy/files/bubble_modify.py View File

@@ -0,0 +1,48 @@
"""
This inline script modifies a streamed response.
If you do not need streaming, see the modify_response_body example.
Be aware that content replacement isn't trivial:
- If the transfer encoding isn't chunked, you cannot simply change the content length.
- If you want to replace all occurrences of "foobar", make sure to catch the cases
where one chunk ends with [...]foo" and the next starts with "bar[...].
"""
import aiohttp
import urllib
from bubble_config import bubble_port
from bubble_api import HEADER_BUBBLE_MATCHERS


BUFFER_SIZE = 4096


def stream_data(stream):
yield stream.read_nowait(BUFFER_SIZE)


async def fetch(session, url, chunks):
async with session.post(url, data=chunks) as response:
if response.status != 200:
raise RuntimeError("Error fetching "+url+", HTTP status "+str(response.status))
return stream_data(response.content)


async def filter_chunks_with_matchers(chunks, matchers):
rule_string = urllib.parse.quote_plus(matchers)
url = 'http://127.0.0.1:'+bubble_port+'/api/filter/apply/' + rule_string
async with aiohttp.ClientSession() as session:
await fetch(session, url, chunks)


def filter_with_matchers(matchers):
return lambda chunks: filter_chunks_with_matchers(chunks, matchers)


def responseheaders(flow):
if HEADER_BUBBLE_MATCHERS in flow.request.headers:
matchers = flow.request.headers[HEADER_BUBBLE_MATCHERS]
if matchers:
flow.response.stream = filter_with_matchers(matchers)
else:
pass
else:
pass

+ 12
- 0
automation/roles/mitmproxy/files/bubble_role.json View File

@@ -0,0 +1,12 @@
{
"name": "mitmproxy-0.0.1",
"priority": 600,
"template": true,
"install": "node",
"config": [
{"name": "admin_port", "value": "[[node.adminPort]]"},
{"name": "bubble_network", "value": "[[node.network]]"},
{"name": "mitm_port", "value": "[[configuration.mitmPort]]"}
],
"tgzB64": ""
}

+ 90
- 0
automation/roles/mitmproxy/files/dns_spoofing.py View File

@@ -0,0 +1,90 @@
"""
This script makes it possible to use mitmproxy in scenarios where IP spoofing
has been used to redirect connections to mitmproxy. The way this works is that
we rely on either the TLS Server Name Indication (SNI) or the Host header of the
HTTP request. Of course, this is not foolproof - if an HTTPS connection comes
without SNI, we don't know the actual target and cannot construct a certificate
that looks valid. Similarly, if there's no Host header or a spoofed Host header,
we're out of luck as well. Using transparent mode is the better option most of
the time.

Usage:
mitmproxy
-p 443
-s dns_spoofing.py
# Used as the target location if neither SNI nor host header are present.
--mode reverse:http://example.com/
# To avoid auto rewriting of host header by the reverse proxy target.
--set keep_host_header
mitmdump
-p 80
--mode reverse:http://localhost:443/

(Setting up a single proxy instance and using iptables to redirect to it
works as well)
"""
import json
import re
from bubble_api import bubble_matchers, HEADER_BUBBLE_MATCHERS
from mitmproxy import ctx

# This regex extracts splits the host header into host and port.
# Handles the edge case of IPv6 addresses containing colons.
# https://bugzilla.mozilla.org/show_bug.cgi?id=45891
parse_host_header = re.compile(r"^(?P<host>[^:]+|\[.+\])(?::(?P<port>\d+))?$")


class Rerouter:
@staticmethod
def get_matcher_ids(flow, host):
if host is None:
return None

remote_addr = flow.client_conn.address[0]
host = host.decode('utf-8')
matchers = bubble_matchers(remote_addr, flow, host)
if not matchers:
print("no matchers for remote_addr/host: "+remote_addr+'/'+host)
return None
matcher_ids = []
for m in matchers:
ctx.log.info('get_matcher_ids: checking for match of path='+flow.request.path+' against regex: '+m['regex'])
if re.match(m['regex'], flow.request.path):
ctx.log.info('get_matcher_ids: rule matched, adding rule: '+m['rule'])
matcher_ids.append(m['uuid'])
return matcher_ids

def request(self, flow):
if flow.client_conn.tls_established:
flow.request.scheme = "https"
sni = flow.client_conn.connection.get_servername()
port = 443
else:
flow.request.scheme = "http"
sni = None
port = 80

host_header = flow.request.host_header
m = parse_host_header.match(host_header)
if m:
host_header = m.group("host").strip("[]")
if m.group("port"):
port = int(m.group("port"))

# Determine if this request should be filtered
if sni or host_header:
matchers = self.get_matcher_ids(flow, sni or host_header)
if matchers:
ctx.log.info("dns_spoofing.request: found matchers: " + ' '.join(matchers))
flow.request.headers[HEADER_BUBBLE_MATCHERS] = json.dumps(matchers)
else:
ctx.log.info('dns_spoofing.request: no rules returned, passing thru...')
else:
ctx.log.info('dns_spoofing.request: no sni/host found, not applying rules to path: ' + flow.request.path)

flow.request.host_header = host_header
flow.request.host = sni or host_header
flow.request.port = port


addons = [Rerouter()]

+ 32
- 0
automation/roles/mitmproxy/files/install_cert.sh View File

@@ -0,0 +1,32 @@
#!/bin/bash

CERT="${1:?no cert provided}"
TIMEOUT=${2:-0}

function die {
echo 1>&2 "${1}"
exit 1
}

START=$(date +%s)
while [[ ! -f "${CERT}" ]] ; do
ELAPSED=$(expr $(date +%s) - ${START})
if [[ ${ELAPSED} -gt ${TIMEOUT} ]] ; then
break
fi
echo "Cert file does not exist, sleeping then rechecking: ${CERT}"
sleep 5s
done

if [[ ! -f "${CERT}" ]] ; then
die "Cert file does not exist: ${CERT}"
fi

if [[ "${CERT}" == *.pem || "${CERT}" == *.p12 ]] ; then
openssl x509 -in "${CERT}" -inform PEM -out "${CERT}.crt" || die "Error converting certificate"
CERT="${CERT}.crt"
fi

mkdir -p /usr/local/share/ca-certificates || die "Error ensuring CA certs directory exists"
cp "${CERT}" /usr/local/share/ca-certificates || die "Error installing certificate"
update-ca-certificates || die "Error updating CA certificates"

BIN
View File


+ 28
- 0
automation/roles/mitmproxy/files/reuse_bubble_mitm_certs.sh View File

@@ -0,0 +1,28 @@
#!/bin/bash

function die {
echo 1>&2 "${1}"
exit 1
}

CERTS_BACKUP=/home/bubble/mitm_certs
if [[ ! -d ${CERTS_BACKUP} ]] ; then
echo "No mitm_certs backup found, skipping restore"
exit 0
fi

MITM_CERTS=/home/mitmproxy/.mitmproxy
if [[ -d ${MITM_CERTS} ]] ; then
echo "Removing obsolete mitm certs: ${MITM_CERTS}"
rm -rf ${MITM_CERTS} || die "Error removing obsolete mitm certs"
if [[ -d ${MITM_CERTS} ]] ; then
die "Error removing obsolete mitm certs: dir still exists: ${MITM_CERTS}"
fi
fi

mkdir -p ${MITM_CERTS} || die "Error creating mitm certs dir: ${MITM_CERTS}"
chmod 750 ${MITM_CERTS} || die "Error setting permissions on mitm certs dir: ${MITM_CERTS}"
cp -R ${CERTS_BACKUP}/* ${MITM_CERTS}/ || die "Error restoring mitm certs"
chown -R mitmproxy ${MITM_CERTS} || die "Error changing ownership of ${MITM_CERTS}"
chgrp -R root ${MITM_CERTS} || die "Error changing group ownership of ${MITM_CERTS}"
chmod 440 ${MITM_CERTS}/* || die "Error setting permissions on mitm certs files"

+ 19
- 0
automation/roles/mitmproxy/files/run_mitmdump.sh View File

@@ -0,0 +1,19 @@
#!/bin/bash

MITM_PORT=${1:?no port provided}
cd /home/mitmproxy/mitmproxy && \
./dev.sh && . ./venv/bin/activate && \
mitmdump \
--listen-host 0.0.0.0 \
--listen-port ${MITM_PORT} \
--showhost \
--no-http2 \
--set block_global=false \
--set block_private=false \
--set termlog_verbosity=debug \
--set flow_detail=3 \
--set stream_large_bodies=5m \
--set keep_host_header \
-s ./dns_spoofing.py \
-s ./bubble_modify.py \
--mode reverse:https://example.com:443/

+ 88
- 0
automation/roles/mitmproxy/tasks/main.yml View File

@@ -0,0 +1,88 @@
- name: Install python3, pip, virtualenv and required dependencies
apt:
name: [ 'python3-pip', 'python3-venv', 'libc6-dev', 'libpython3-dev', 'g++', 'libffi-dev' ]
state: present
update_cache: yes

- name: Install supervisor conf file
template:
src: supervisor_mitmproxy.conf.j2
dest: /etc/supervisor/conf.d/mitmproxy.conf
owner: root
group: root
mode: 0400

- name: Create mitmproxy user
user:
name: mitmproxy
comment: mitmdump user
shell: /bin/bash
system: yes
home: /home/mitmproxy

- name: Creates mitmproxy dir
file:
path: /home/mitmproxy/mitmproxy
owner: mitmproxy
group: mitmproxy
mode: 0755
state: directory

- name: Unzip mitmproxy.zip
unarchive:
src: mitmproxy.zip
dest: /home/mitmproxy/mitmproxy

- name: Copy mitmdump files
copy:
src: "{{ item }}"
dest: "/home/mitmproxy/mitmproxy/{{ item }}"
owner: mitmproxy
group: mitmproxy
mode: 0500
with_items:
- bubble_api.py
- dns_spoofing.py
- bubble_modify.py
- run_mitmdump.sh

- name: Set ownership of mitmproxy files
shell: chown -R mitmproxy /home/mitmproxy/mitmproxy

- name: Install cert helper scripts
copy:
src: "{{ item }}"
dest: "/usr/local/bin/{{ item }}"
owner: root
group: root
mode: 0500
with_items:
- install_cert.sh
- reuse_bubble_mitm_certs.sh

- name: Reuse bubble mitm certs if available
shell: reuse_bubble_mitm_certs.sh

- name: Copy bubble_config.py to /home/mitmproxy/mitmproxy
template:
src: bubble_config.py.j2
dest: /home/mitmproxy/mitmproxy/bubble_config.py
owner: mitmproxy
group: mitmproxy
mode: 0500

- name: Fix missing symlink for libstdc++
file:
src: /usr/lib/x86_64-linux-gnu/libstdc++.so.6
dest: /usr/lib/x86_64-linux-gnu/libstdc++.so
owner: root
group: root
state: link

- name: restart supervisord
service:
name: supervisor
enabled: yes
state: restarted

- import_tasks: route.yml

+ 55
- 0
automation/roles/mitmproxy/tasks/route.yml View File

@@ -0,0 +1,55 @@
- sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
- sysctl:
name: net.ipv6.conf.all.forwarding
value: 1
sysctl_set: yes
- sysctl:
name: net.ipv4.conf.all.send_redirects
value: 0
sysctl_set: yes

- name: "Allow MITM private port"
iptables:
chain: INPUT
action: insert
rule_num: 10
protocol: tcp
destination_port: "{{ mitm_port }}"
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new local TCP DNS connections on private port
become: yes

- name: Route port 80 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 1
protocol: tcp
destination_port: 80
jump: REDIRECT
to_ports: "{{ mitm_port }}"

- name: Route port 443 through mitmproxy
iptables:
table: nat
chain: PREROUTING
action: insert
rule_num: 2
protocol: tcp
destination_port: 443
jump: REDIRECT
to_ports: "{{ mitm_port }}"

- name: save iptables rules
shell: iptables-save > /etc/iptables/rules.v4
become: yes

- name: save iptables v6 rules
shell: ip6tables-save > /etc/iptables/rules.v6
become: yes

+ 3
- 0
automation/roles/mitmproxy/templates/bubble_config.py.j2 View File

@@ -0,0 +1,3 @@
bubble_network = '{{ bubble_network }}'
bubble_port = '{{ admin_port }}';


+ 5
- 0
automation/roles/mitmproxy/templates/supervisor_mitmproxy.conf.j2 View File

@@ -0,0 +1,5 @@

[program:mitmdump]
stdout_logfile = /home/mitmproxy/mitmdump-out.log
stderr_logfile = /home/mitmproxy/mitmdump-err.log
command=sudo -u mitmproxy bash -c "/home/mitmproxy/mitmproxy/run_mitmdump.sh {{ mitm_port }}"

+ 34
- 0
automation/roles/nginx/defaults/main.yml View File

@@ -0,0 +1,34 @@
---
# user under which we run acme-tiny and owner of ssl_base_folder
acme_user: acme

# Path where we put all our SSL private keys and certificates
acme_ssl_base_folder: /var/ssl

# Path where we tell acme-tiny to put our challenges. Writable by acme_user and readable by
# www-data (nginx)
acme_challenges_folder_path: "{{ acme_ssl_base_folder }}/challenges"

# A list of *simple* domains to run acme_tiny on. Example: "www.example.com"
# For each of those domains, a CSR with a single domain will be created and processed.
acme_domain: "{{ server_name }}"

# A list of domains that are grouped in the same request.
# The generated CSR will use SAN openssl configuration when generating the CSR.
# See acme_tiny README.
# Each item in this list is a hash with the same structure as _acme_multi_domains_item.
acme_multi_domains: []

# Example: {'base_name': 'example.com', domains: ['example.com', 'www.example.com']}
# base_name will be the main name of the cert, that is, the name of the folder containing its
# keys and certs as well as the name of the nginx snippet.
# The first domain of "domains" is the "main" host of the resulting cert.
_acme_multi_domains_item:
base_name: ''
domains: []

# If set, we will fetch existing SSL data from our specified host name (from the inventory)
# **instead** of going through with ACME challenges. You should use this option when you're in the
# process of transferring a host and you want to avoid the downtime between the moment where you
# change your DNS record and the moment where you can proceed with a proper challenge.
acme_copy_from_hostname: ''

+ 12
- 0
automation/roles/nginx/files/bubble_role.json View File

@@ -0,0 +1,12 @@
{
"name": "nginx-0.0.1",
"priority": 500,
"template": true,
"config": [
{"name": "server_name", "value": "[[node.fqdn]]"},
{"name": "letsencrypt_email", "value": "[[configuration.letsencryptEmail]]"},
{"name": "ssl_port", "value": "[[configuration.nginxPort]]"},
{"name": "admin_port", "value": "[[node.adminPort]]"}
],
"tgzB64": ""
}

+ 13
- 0
automation/roles/nginx/files/init_certbot.sh View File

@@ -0,0 +1,13 @@
#!/bin/bash

LE_EMAIL="${1}"
SERVER_NAME="${2}"
if [[ $(find /etc/letsencrypt/accounts -type f -name regr.json | xargs grep -l \"${LE_EMAIL}\" | wc -l | tr -d ' ') -eq 0 ]] ; then
certbot register --agree-tos -m ${LE_EMAIL} --non-interactive
fi

if [[ ! -f /etc/letsencrypt/live/${SERVER_NAME}/fullchain.pem ]] ; then
certbot certonly --standalone --non-interactive -d ${SERVER_NAME}
else
certbot renew --standalone --non-interactive
fi

+ 4
- 0
automation/roles/nginx/handlers/main.yml View File

@@ -0,0 +1,4 @@
---
- name: nginx reload
service: name=nginx state=reloaded


+ 68
- 0
automation/roles/nginx/tasks/main.yml View File

@@ -0,0 +1,68 @@
- name: Install OpenSSL, nginx and software-properties-common
apt:
name: [ 'openssl', 'nginx', 'software-properties-common' ]
state: present
update_cache: yes

- name: Enable Ubuntu universe repositories
apt_repository:
repo: "{{ item }}"
state: present
loop:
- "deb http://archive.ubuntu.com/ubuntu/ bionic universe"
- "deb http://archive.ubuntu.com/ubuntu/ bionic-updates universe"
- "deb http://security.ubuntu.com/ubuntu/ bionic-security universe"

- name: Enable ppa:certbot/certbot repository
apt_repository:
repo: ppa:certbot/certbot
state: present

- name: Update packages after adding new repositories
apt:
update_cache: yes

- name: Install certbot
apt:
name: [ 'certbot' ]
state: present
update_cache: yes

- name: Ensure nginx can read cert files
file:
dest: /etc/letsencrypt
group: www-data
recurse: yes

- name: Ensure nginx is stopped
service:
name: nginx
state: stopped

- name: Install init_certbot script
copy:
src: init_certbot.sh
dest: /usr/local/bin/init_certbot.sh
owner: root
group: root
mode: 0555

- name: Init certbot
shell: init_certbot.sh {{ letsencrypt_email }} {{ server_name }}

# see https://weakdh.org/sysadmin.html
- name: Create a strong dhparam.pem
shell: openssl dhparam -out /etc/nginx/dhparams.pem 2048
args:
creates: /etc/nginx/dhparams.pem

- name: Create dhparam nginx conf
template: src=stronger_dhparams.conf dest=/etc/nginx/conf.d/stronger_dhparams.conf

- include: site.yml
- meta: flush_handlers # nginx has to be restarted right now if it has to

- name: Ensure nginx is restarted
service:
name: nginx
state: restarted

+ 15
- 0
automation/roles/nginx/tasks/site.yml View File

@@ -0,0 +1,15 @@
- name: Disable default site
file:
path: /etc/nginx/sites-enabled/default
state: absent

- name: Create default site
template: src=site.conf.j2 dest=/etc/nginx/sites-available/{{ server_name }}.conf

- name: Symlink default site to site-enabled
file:
src: /etc/nginx/sites-available/{{ server_name }}.conf
dest: /etc/nginx/sites-enabled/{{ server_name }}.conf
owner: root
group: root
state: link

+ 13
- 0
automation/roles/nginx/templates/site.conf.j2 View File

@@ -0,0 +1,13 @@

server {
server_name {{ server_name }};
listen {{ ssl_port }} ssl http2;

ssl_certificate /etc/letsencrypt/live/{{ server_name }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ server_name }}/privkey.pem;

location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:{{ admin_port }}/;
}
}

+ 1
- 0
automation/roles/nginx/templates/stronger_dhparams.conf View File

@@ -0,0 +1 @@
ssl_dhparam /etc/nginx/dhparams.pem;

+ 84
- 0
bin/activate View File

@@ -0,0 +1,84 @@
#!/bin/bash
#
# Initial activation of a bubble server
#
# Usage: activate [domain] [dns] [storage]
#
# domain : a domain name. Must be listed in bubble-server/src/test/resources/models/system/bubbleDomain.json
# default value is bubblev.org
# dns : name of a CloudService of type 'dns'. Must be listed in bubble-server/src/test/resources/models/system/cloudService.json
# default is GoDaddyDns
# storage : name of a CloudService of type 'storage'. Must be listed in cloudService.json
# default is S3_US_Standard
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

if [[ -z "${BUBBLE_JAR}" ]] ; then
die "BUBBLE_JAR env var not set and no jar file found"
fi

MODELS_DIR="${SCRIPT_DIR}/../bubble-server/src/test/resources/models/system"
if [[ ! -d ${MODELS_DIR} ]] ; then
die "Models directory not found: ${MODELS_DIR}"
fi

ENV_FILE="${HOME}/.bubble.env"
if [[ ! -f ${ENV_FILE} ]] ; then
die "env file not found: ${ENV_FILE}"
fi

# Source env vars
. ${ENV_FILE}

DOMAIN=${1:-bubblev.org}
DOMAINS_FILE="${MODELS_DIR}/bubbleDomain.json"
DOMAIN_JSON=$(cat ${DOMAINS_FILE} | sed -e 's,// .*,,g' | grep -v "_subst" | java -cp ${BUBBLE_JAR} bubble.main.BubbleMain handlebars | jq ".[] | select(.name==\"${DOMAIN}\")")
if [[ -z "${DOMAIN_JSON}" ]] ; then
die "Domain ${DOMAIN} not found in ${DOMAINS_FILE}"
fi

DNS_CLOUD=${2:-GoDaddyDns}
CLOUDS_FILE="${MODELS_DIR}/cloudService.json"
DNS_JSON=$(cat ${CLOUDS_FILE} | sed -e 's,// .*,,g' | grep -v "_subst" | java -cp ${BUBBLE_JAR} bubble.main.BubbleMain handlebars | jq ".[] | select(.name==\"${DNS_CLOUD}\")")
if [[ -z "${DNS_JSON}" ]] ; then
die "DNS CloudService ${DNS_CLOUD} not found in ${CLOUDS_FILE}"
fi
CLOUD_TYPE="$(echo ${DNS_JSON} | jq -r .type)"
if [[ -z "${CLOUD_TYPE}" ]] ; then
die "DNS service ${DNS_CLOUD} has no type"
fi
if [[ "${CLOUD_TYPE}" != 'dns' ]] ; then
die "DNS service ${DNS_CLOUD} has wrong type (${CLOUD_TYPE}), expected: dns"
fi

STORAGE_CLOUD=${2:-S3_US_Standard}
STORAGE_JSON=$(cat ${CLOUDS_FILE} | sed -e 's,// .*,,g' | grep -v "_subst" | java -cp ${BUBBLE_JAR} bubble.main.BubbleMain handlebars | jq ".[] | select(.name==\"${STORAGE_CLOUD}\")")
if [[ -z "${STORAGE_JSON}" ]] ; then
die "Storage CloudService ${STORAGE_CLOUD} not found in ${CLOUDS_FILE}"
fi
CLOUD_TYPE="$(echo ${STORAGE_JSON} | jq -r .type)"
if [[ -z "${CLOUD_TYPE}" ]] ; then
die "Storage service ${DNS_CLOUD} has no type"
fi
if [[ "${CLOUD_TYPE}" != 'storage' ]] ; then
die "Storage service ${DNS_CLOUD} has wrong type (${CLOUD_TYPE}), expected: storage"
fi

exec ${SCRIPT_DIR}/bput auth/activate - --no-login <<EOF
{
"name": "${BUBBLE_USER}",
"password": "${BUBBLE_PASS}",
"networkName": "$(hostname -s)",
"domain": ${DOMAIN_JSON},
"dns": ${DNS_JSON},
"storage": ${STORAGE_JSON}
}
EOF

+ 16
- 0
bin/bdecrypt View File

@@ -0,0 +1,16 @@
#!/bin/bash
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

if [[ -z "${BUBBLE_DB_ENCRYPTION_KEY}" ]] ; then
if [[ -f "${HOME}/.BUBBLE_DB_ENCRYPTION_KEY" ]] ; then
BUBBLE_DB_ENCRYPTION_KEY=$(cat ${HOME}/.BUBBLE_DB_ENCRYPTION_KEY)
elif [[ -f "/home/bubble/.BUBBLE_DB_ENCRYPTION_KEY" ]] ; then
BUBBLE_DB_ENCRYPTION_KEY=$(cat /home/bubble/.BUBBLE_DB_ENCRYPTION_KEY)
else
die "BUBBLE_DB_ENCRYPTION_KEY env var not defined, and no .BUBBLE_DB_ENCRYPTION_KEY file found"
fi
fi

BUBBLE_DB_ENCRYPTION_KEY=${BUBBLE_DB_ENCRYPTION_KEY} exec ${SCRIPT_DIR}/bubble crypt -f decrypt "${@}"

+ 27
- 0
bin/bdelete View File

@@ -0,0 +1,27 @@
#!/bin/bash
#
# Run an HTTP DELETE against the API
#
# Usage:
#
# bdelete path [options]
#
# path : an API path
# options : bscript options, see bubble.main.BubbleScriptOptions (and parent classes) for more info
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

URL="${1:?no URL provided}"
shift

exec ${SCRIPT_DIR}/bubble delete -U ${URL} ${@}

+ 16
- 0
bin/bencrypt View File

@@ -0,0 +1,16 @@
#!/bin/bash
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

if [[ -z "${BUBBLE_DB_ENCRYPTION_KEY}" ]] ; then
if [[ -f "${HOME}/.BUBBLE_DB_ENCRYPTION_KEY" ]] ; then
BUBBLE_DB_ENCRYPTION_KEY=$(cat ${HOME}/.BUBBLE_DB_ENCRYPTION_KEY)
elif [[ -f "/home/bubble/.BUBBLE_DB_ENCRYPTION_KEY" ]] ; then
BUBBLE_DB_ENCRYPTION_KEY=$(cat /home/bubble/.BUBBLE_DB_ENCRYPTION_KEY)
else
die "BUBBLE_DB_ENCRYPTION_KEY env var not defined, and no .BUBBLE_DB_ENCRYPTION_KEY file found"
fi
fi

BUBBLE_DB_ENCRYPTION_KEY=${BUBBLE_DB_ENCRYPTION_KEY} exec ${SCRIPT_DIR}/bubble crypt -f encrypt "${@}"

+ 27
- 0
bin/bget View File

@@ -0,0 +1,27 @@
#!/bin/bash
#
# Run an HTTP GET against the API
#
# Usage:
#
# bget path [options]
#
# path : an API path
# options : bscript options, see bubble.main.BubbleScriptOptions (and parent classes) for more info
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

URL="${1:?no URL provided}"
shift

exec ${SCRIPT_DIR}/bubble get -U ${URL} ${@}

+ 38
- 0
bin/bmodel View File

@@ -0,0 +1,38 @@
#!/bin/bash
#
# Write a model to a bubble server
#
# Usage: model [-u/--update-all] model-file
#
# -u or --update : if present, every entity that is not otherwise created will be updated
# model-file : a manifest.json file or a single model JSON file
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_SCRIPTS : location of run.sh script. Default is to assume it is in the same directory containing this script
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

UPDATE_OPT=""
if [[ ! -z "${1}" && ( "${1}" == "-u" || "${1}" == "--update-all" ) ]] ; then
UPDATE_OPT="--update-all"
shift
fi

MODEL="${1:?no manifest or model file specified}"
shift

is_manifest="$(basename ${MODEL} | grep "manifest" | wc -c | tr -d ' ')"
if [[ ${is_manifest} -gt 0 ]] ; then
MODEL_OPT="-m"
else
MODEL_OPT="-f"
fi

${SCRIPT_DIR}/bubble model ${UPDATE_OPT} ${MODEL_OPT} "${MODEL}"


+ 38
- 0
bin/bpatch View File

@@ -0,0 +1,38 @@
#!/bin/bash
#
# Usage:
#
# bpatch hostname
#
# hostname : the hostname of the bubble node to update
# Usually you will have an entry in ~/.ssh/config to set the username and ssh key
# norestart : If present, do not restart the API server after updating the jar file
#
# Patch the bubble.jar on a remote node.
# This script only works if the only classes that have changed are in the bubble-server codebase.
# If other classes have changed, use bpatchfull
#
# You install the JDK on the remote node first: apt install openjdk-11-jdk-headless
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

HOST=${1:?no host provided}
NO_RESTART=${2}

BUBBLE_SERVER_DIR="${SCRIPT_DIR}/../bubble-server"
if [[ ! -d "${BUBBLE_SERVER_DIR}" ]] ; then
die "bubble-server dir not found: ${BUBBLE_SERVER_DIR}"
fi
cd ${BUBBLE_SERVER_DIR}

mvn -DskipTests=true -Dcheckstyle.skip=true compile && rsync -avzc ./target/classes ${HOST}:/tmp/ | egrep -v '*/$' || die "Error recompiling classes"

if [[ ! -z "${NO_RESTART}" && "${NO_RESTART}" == "norestart" ]] ; then
echo "Patching but not restarting..."
ssh ${HOST} "cd /tmp && cp ~bubble/current/bubble.jar . && cd classes && jar uvf ../bubble.jar . | egrep -v '*/\(*' && cat ../bubble.jar > ~bubble/current/bubble.jar" || die "Error patching remote jar"
else
echo "Patching and restarting..."
ssh ${HOST} "cd /tmp && cp ~bubble/current/bubble.jar . && cd classes && jar uvf ../bubble.jar . | egrep -v '*/\(*' && cat ../bubble.jar > ~bubble/current/bubble.jar && supervisorctl restart bubble" || die "Error patching remote jar"
fi

+ 55
- 0
bin/bpatchfull View File

@@ -0,0 +1,55 @@
#!/bin/bash
#
# Usage:
#
# bpatchfull hostname [norestart]
#
# hostname : the hostname of the bubble node to update
# Usually you will have an entry in ~/.ssh/config to set the username and ssh key
# norestart : If present, do not restart the API server after updating the jar file
#
# Patch the bubble.jar on a remote node.
# This script updates the entire jar file, and takes a lot longer than bpatch
#
# You install the JDK on the remote node first: apt install openjdk-11-jdk-headless
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

HOST=${1:?no host provided}
NO_RESTART=${2}

BUBBLE_SERVER_DIR="${SCRIPT_DIR}/../bubble-server"
if [[ ! -d "${BUBBLE_SERVER_DIR}" ]] ; then
die "bubble-server dir not found: ${BUBBLE_SERVER_DIR}"
fi
cd ${BUBBLE_SERVER_DIR}

ANY_JAR=$(find "./target" -type f -name "bubble*.jar" | head -1 | wc -l | tr -d ' ')
if [[ ${ANY_JAR} -eq 0 ]] ; then
ANY_CHANGES=1
else
ANY_CHANGES=$(find "./src/main" -type f -newer "$(find "./target" -type f -name "bubble*.jar" | head -1)" | wc -l | tr -d ' ')
fi
if [[ ${ANY_CHANGES} -eq 0 ]] ; then
echo "No changes, not repackaging jar"
scp ./target/bubble*.jar ${HOST}:/tmp/bubble.jar || die "Error copying file to remote host ${HOST}"
else
if [[ ${ANY_JAR} -eq 0 ]] ; then
echo "No bubble jar file found, rebuilding"
else
echo "Files changed, rebuilding bubble jar: "
find "./src/main" -type f -newer "$(find "./target" -type f -name "bubble*.jar" | head -1)"
fi
mvn -DskipTests=true -Dcheckstyle.skip=true clean package || die "Error packaging jar"
scp ./target/bubble*.jar ${HOST}:/tmp/bubble.jar || die "Error copying file to remote host ${HOST}"
fi

if [[ ! -z "${NO_RESTART}" && "${NO_RESTART}" == "norestart" ]] ; then
echo "Patching but not restarting..."
ssh ${HOST} "cat /tmp/bubble.jar > ~bubble/current/bubble.jar"
else
echo "Patching and restarting..."
ssh ${HOST} "cat /tmp/bubble.jar > ~bubble/current/bubble.jar && supervisorctl restart bubble"
fi

+ 45
- 0
bin/bpost View File

@@ -0,0 +1,45 @@
#!/bin/bash
#
# Run an HTTP POST against the API
#
# Usage:
#
# bpost path [file] [options]
#
# path : an API path
# file : a JSON file to POST. To read from stdin, specify the file as -
# options : bscript options, see bubble.main.BubbleScriptOptions (and parent classes) for more info
#
# Environment variables
#
# BUBBLE_ENTITY : the filename that contains the JSON to send in the POST. If empty, entity is read from stdin
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

URL="${1:?no URL provided}"
shift

REQUEST_JSON="${1}"
if [[ -z "${REQUEST_JSON}" ]] ; then
die "No request JSON file specified. Use - to read from stdin"
fi
if [[ "${REQUEST_JSON}" == "-" ]] ; then
echo 1>&2 "Reading request JSON from stdin"

elif [[ ! -f "${REQUEST_JSON}" && "${REQUEST_JSON}" != /dev/null ]] ; then
die "Request JSON file does not exist: ${REQUEST_JSON}"
fi
shift

if [[ "${REQUEST_JSON}" == "-" ]] ; then
exec ${SCRIPT_DIR}/bubble post -U ${URL} ${@}
else
cat ${REQUEST_JSON} | exec ${SCRIPT_DIR}/bubble post -U ${URL} ${@}
fi

+ 28
- 0
bin/bposte View File

@@ -0,0 +1,28 @@
#!/bin/bash
#
# Run an HTTP POST against the API with an empty request entity
#
# Usage:
#
# bposte path [options]
#
# path : an API path
# options : bscript options, see bubble.main.BubbleScriptOptions (and parent classes) for more info
#
# Environment variables
#
# BUBBLE_ENTITY : the filename that contains the JSON to send in the POST. If empty, entity is read from stdin
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

URL="${1:?no URL provided}"
shift

cat /dev/null | exec ${SCRIPT_DIR}/bubble post -U ${URL} ${@}

+ 45
- 0
bin/bput View File

@@ -0,0 +1,45 @@
#!/bin/bash
#
# Run an HTTP PUT against the API
#
# Usage:
#
# bput path [file] [options]
#
# path : an API path
# file : a JSON file to POST. To read from stdin, specify the file as -
# options : bscript options, see bubble.main.BubbleScriptOptions (and parent classes) for more info
#
# Environment variables
#
# BUBBLE_ENTITY : the filename that contains the JSON to send in the PUT. If empty, entity is read from stdin
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

URL="${1:?no URL provided}"
shift

REQUEST_JSON="${1}"
if [[ -z "${REQUEST_JSON}" ]] ; then
die "No request JSON file specified. Use - to read from stdin"
fi
if [[ "${REQUEST_JSON}" == "-" ]] ; then
echo 1>&2 "Reading request JSON from stdin"

elif [[ ! -f "${REQUEST_JSON}" && "${REQUEST_JSON}" != /dev/null ]] ; then
die "Request JSON file does not exist: ${REQUEST_JSON}"
fi
shift

if [[ "${REQUEST_JSON}" == "-" ]] ; then
exec ${SCRIPT_DIR}/bubble put -U ${URL} ${@}
else
cat ${REQUEST_JSON} | exec ${SCRIPT_DIR}/bubble put -U ${URL} ${@}
fi

+ 28
- 0
bin/bpute View File

@@ -0,0 +1,28 @@
#!/bin/bash
#
# Run an HTTP PUT against the API with an empty request entity
#
# Usage:
#
# bpute path [options]
#
# path : an API path
# options : bscript options, see bubble.main.BubbleScriptOptions (and parent classes) for more info
#
# Environment variables
#
# BUBBLE_ENTITY : the filename that contains the JSON to send in the PUT. If empty, entity is read from stdi
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

URL="${1:?no URL provided}"
shift

cat /dev/null | exec ${SCRIPT_DIR}/bubble put -U ${URL} ${@}

+ 49
- 0
bin/bscript View File

@@ -0,0 +1,49 @@
#!/bin/bash
#
# Run a local JSON API script against a remote API server
#
# Usage:
#
# run-script script-file [options] [args]
#
# script-file : a JSON API script
# options : script options, see bubble.main.BubbleScriptOptions (and parent classes) for more info
# args : a JSON object representing arguments to the script.
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

BUBBLE_SERVER=$(cd $(dirname ${0})/.. && pwd)

CANDIDATE_INCLUDES="
${BUBBLE_SERVER}/src/test/resources/models/minimal/tests
${BUBBLE_SERVER}/resources/models/minimal/tests
${SCRIPT_DIR}/models/minimal/tests
"

if [[ -z "${BUBBLE_INCLUDE}" ]] ; then
for include in ${CANDIDATE_INCLUDES} ; do
if [[ -d ${include} ]] ; then
BUBBLE_INCLUDE="$(cd ${include} && pwd)"
break
fi
done
fi
if [[ ! -z "${BUBBLE_INCLUDE}" ]] ; then
BUBBLE_INCLUDE="-I ${BUBBLE_INCLUDE}"
fi

SCRIPT=${1:?no JSON script provided}
shift
ARGS=$(quote_args "$@")

exec ${SCRIPT_DIR}/bubble script -H ${BUBBLE_INCLUDE} ${ARGS} ${SCRIPT}

+ 98
- 0
bin/bubble View File

@@ -0,0 +1,98 @@
#!/bin/bash
#
# Bubble client script. Wraps the run.sh script.
#
# Usually not called directly, instead use one of the higher-level wrappers: sync-model, run-script
#
# Usage: bubble command [args]
#
# command : one of the Bubble CLI commands. Find the full list of commands in BubbleMain.java
# args : depends on the command, usually -h / --help will show command help
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_ENV : env file to load. Default is ~/.bubble.env or /home/bubble/current/bubble.env (whichever is found first)
# DEBUG_PORT : if set, this is the port number the client will wait for a debugger to attach before starting
# BUBBLE_INCLUDE : when using the sync-model and run-script commands, this is the directory to find included files
# For sync-model and migrate-model, the default is the current directory.
# For run-script, the default is a directory named "tests" within the current directory
# BUBBLE_SCRIPTS : location of run.sh script. Default is to assume it is in the same directory containing this script
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

function is_api_command {
case "${1}" in
-h|--help)
echo "0"
;;
*)
echo "1"
;;
esac
}

if [[ -z "${DEBUG_PORT}" ]] ; then
debug=""
else
debug="debug ${DEBUG_PORT}"
fi

if [[ -z "${1}" ]] ; then
COMMAND="-h"
else
COMMAND="${1}"
shift
fi

# always send help commands through
last_arg="$(echo "${@}" | awk '{print $NF}')"
is_help=0
if [[ ! -z "${last_arg}" && ( "${last_arg}" == "-h" || "${last_arg}" == "--help" ) ]] ; then
is_help=1
fi

if [[ -z "${BUBBLE_ENV}" ]] ; then
if [[ -f "${HOME}/.bubble.env" ]] ; then
BUBBLE_ENV="${HOME}/.bubble.env"
elif [[ -f "/home/bubble/current/bubble.env" ]] ; then
BUBBLE_ENV="/home/bubble/current/bubble.env"
else
die "bubble environment file not found"
fi
fi

if [[ -z "${BUBBLE_API}" ]] ; then
if [[ "$(is_api_command ${COMMAND})" == "1" && is_help -eq 0 ]] ; then
BUBBLE_API=local
fi
fi
if [[ "${BUBBLE_API}" == "local" ]] ; then
if [[ -z "${BUBBLE_PORT}" ]] ; then
BUBBLE_PORT="$(cat ${BUBBLE_ENV} | egrep -v '\s*#' | grep BUBBLE_SERVER_PORT | awk -F '=' '{print $NF}' | tr -d "'" | tr -d '"')"
if [[ -z "${BUBBLE_PORT}" ]] ; then
die "Error reading BUBBLE_SERVER_PORT from ${BUBBLE_ENV}"
fi
fi
BUBBLE_API="http://127.0.0.1:${BUBBLE_PORT}/api"
fi

if [[ -z "${BUBBLE_INCLUDE}" ]] ; then
if [[ "${COMMAND}" == "sync-model" ]] ; then
BUBBLE_INCLUDE="$(pwd)"
elif [[ "${COMMAND}" == "script" ]] ; then
BUBBLE_INCLUDE="$(pwd)/tests"
else
BUBBLE_INCLUDE="$(pwd)"
fi
fi

BUBBLE_PASS=${BUBBLE_PASS} \
BUBBLE_USER=${BUBBLE_USER} \
BUBBLE_API=${BUBBLE_API} \
BUBBLE_INCLUDE=${BUBBLE_INCLUDE} \
exec ${BUBBLE_SCRIPTS}/run.sh ${debug} ${COMMAND} "${@}"

+ 105
- 0
bin/bubble_common View File

@@ -0,0 +1,105 @@
#!/bin/bash

function die {
if [[ -z "${SCRIPT}" ]] ; then
echo 1>&2 "${1}"
else
echo 1>&2 "${SCRIPT}: ${1}"
fi
exit 1
}

function handle_help_request () {

if [[ -z "${2}" ]] ; then
return
fi

if [[ ${2} == "-h" || ${2} == "--help" ]] ; then
while IFS='' read -r line || [[ -n "$line" ]]; do
if [[ ${line} =~ ^#.* ]] ; then
if [[ ! ${line} =~ ^#!/bin/bash.* ]] ; then
echo "${line}"
fi
else
break
fi
done < "${1}"
exit 1
fi

}

function make_temp () {
prefix="${1}"
suffix="${2}"
echo "$(mktemp ${prefix}.XXXXXXXX${suffix})"
}

function make_temp_dir () {
prefix="${1}"
suffix="${2}"
echo "$(mktemp -d ${prefix}.XXXXXXXX${suffix})"
}

function quote_args () {
args=""
for i in "$@"; do
if [[ "$i" =~ \ |\' ]] ; then
i="${i//\\/\\\\}"
args="$args \"${i//\"/\\\"}\""
else
args="$args ${i}"
fi
done
echo -n ${args}
}

handle_help_request ${0} ${1}

# Ensure we can find run.sh
if [[ -z "${BUBBLE_SCRIPTS}" ]] ; then
RUN_SH="$(find $(cd $(dirname ${0}) && pwd) -type f -name "run.sh" | head -1)"
if [[ -z "${RUN_SH}" ]] ; then
RUN_SH="$(find . -type f -name "run.sh" | head -1)"
fi
if [[ -z "${RUN_SH}" ]] ; then
die "run.sh script not found. Set BUBBLE_SCRIPTS to be the directory containing run.sh"
fi
BUBBLE_SCRIPTS="$(dirname "${RUN_SH}")"
elif [[ ! -f "${BUBBLE_SCRIPTS}/run.sh" ]] ; then
die "run.sh script not found in BUBBLE_SCRIPTS dir (${BUBBLE_SCRIPTS})"
fi

if [[ -z "${BUBBLE_PASS}" ]] ; then
if [[ ! -z "${REQUIRE_BUBBLE_PASS}" ]] ; then
die "No BUBBLE_PASS env var defined"
fi
BUBBLE_PASS=password
fi
if [[ -z "${BUBBLE_USER}" ]] ; then
if [[ ! -z "${REQUIRE_BUBBLE_USER}" ]] ; then
die "No BUBBLE_USER env var defined"
fi
BUBBLE_USER=root
fi

if [[ -z "${BUBBLE_JAR}" ]] ; then
if [[ -f "${HOME}/current/bubble.jar" ]] ; then
BUBBLE_JAR="${HOME}/current/bubble.jar"
elif [[ -f "/home/bubble/current/bubble.jar" ]] ; then
BUBBLE_JAR="/home/bubble/current/bubble.jar"
else
BUBBLE_JAR="$(find ${BUBBLE_SCRIPTS}/../bubble-server/target -type f -name bubble*.jar | head -1)"
fi
fi

# Check to see if we are on the PATH, if not suggest that we could be
BUBBLE_BIN="$(cd $(dirname ${0}) && pwd)"
if [[ -z "${BUBBLE_SKIP_PATH_WARNING}" && -z "$(which $(basename ${0}))" ]] ; then
echo 1>&2 "Note: ${BUBBLE_BIN} is not on your PATH. To make things easier, add it to your PATH:"
echo 1>&2 ""
echo 1>&2 "export PATH=\${PATH}:${BUBBLE_BIN}"
echo 1>&2 ""
export BUBBLE_SKIP_PATH_WARNING=1
fi

+ 30
- 0
bin/bunlock View File

@@ -0,0 +1,30 @@
#!/bin/bash
#
# Unlock a new bubble
#
# Usage:
#
# bunlock unlock-key
#
# unlock-key : the unlock key for the new bubble
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)

REQUIRE_BUBBLE_USER=1
REQUIRE_BUBBLE_PASS=1
. ${SCRIPT_DIR}/bubble_common

if [[ -z "${BUBBLE_API}" ]] ; then
die "No BUBBLE_API env var defined"
fi
UNLOCK_KEY=${1:?no unlock-key provided}

echo "{\"name\":\"${BUBBLE_USER}\",\"password\":\"${BUBBLE_PASS}\"}" | \
${SCRIPT_DIR}/bpost 'auth/login?k='"${UNLOCK_KEY}"'' - --no-login

+ 53
- 0
bin/cleanup_bubble_databases View File

@@ -0,0 +1,53 @@
#!/bin/bash
#
# Drop the various temporary databases that sometimes get left around by the tests
# Do not run this command while tests are running
#
# Usage:
#
# cleanup_bubble_databases [db-match] [fg]
#
# db-match all databases that contain this string will be dropped. default is "bubble_"
# foreground normally a forked shell is used for each "dropdb" command; if the last argument is "fg" then we'll use the current shell
#
# Note: if you specify db-match "bubble", this would normally include the default bubble database, but it will be
# specifically excluded by this script, to avoid dropping a real database with real data in it.
#
# To drop the "bubble" database, you must drop it directly with dropdb.
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)

DB_MATCH="${1:-bubble_}"
DB_USER="$(whoami)"
FORK_DROP=1

if [[ "${2:-fork}" = "fg" ]] ; then
FORK_DROP=0
fi
if [[ ${FORK_DROP} -eq 1 ]] ; then
set -m
fi

DATABASE_COUNT="$(echo "select datname from pg_database" | psql -qt -U ${DB_USER} template1 | grep ${DB_MATCH} | wc -l | tr -d ' ')"

if [[ ${DATABASE_COUNT} -gt 0 ]] ; then
echo "Cleaning up ${DATABASE_COUNT} databases..."

DATABASES="$(echo "select datname from pg_database" | psql -qt -U ${DB_USER} template1 | grep ${DB_MATCH} | tr -d ' ')"
if [[ ! -z "${DATABASES}" ]] ; then
for db in ${DATABASES} ; do
if [[ "${db}" == "bubble" ]] ; then
echo "Not dropping bubble default database"
else
if [[ "${FORK_DROP}" -eq 1 ]] ; then
dropdb -U ${DB_USER} ${db} &
else
dropdb -U ${DB_USER} ${db} || echo "Error dropping database: ${db}"
fi
fi
done
fi
else
echo "No databases whose name contains \"${DB_MATCH}\" to clean up"
fi

+ 55
- 0
bin/create_user_and_network View File

@@ -0,0 +1,55 @@
#!/bin/bash
#
# Create a new user and start a network for that user
#
# Usage: create_user_and_network <json-file>
#
# json-file : a JSON file containing specs for the new user and network
#
# The default values are:
#
# {
# "rootUsername": "root",
# "rootPassword": "password",
# "sageFqdn": "_required",
# "nginxPort": "1443",
# "username": "user-<<rand 5>>",
# "password": "<<rand 10>>",
# "network": "test-net-{{rand 5}}",
# "email": "user-<<rand 5>>@example.com",
# "domain": "bubblev.org",
# "locale": "en_US",
# "lang": "en",
# "timezone": "EST",
# "plan": "bubble pro",
# "footprint": "Worldwide, excluding OFAC",
# "compute": "VultrCompute",
# "region": "New Jersey"
# }
#
# The required fields above have a value of "_required". A minimal example would be:
#
# {
# "sageFqdn": "api-XXXXX.staging.bubblev.org"
# }
#
# This would add a user with a random name user-{{rand 5}} with a random password and random @example.com email address,
# and a randomly named network with the name test-net-{{rand 5}}
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_SCRIPTS : location of run.sh script. Default is to assume it is in the same directory containing this script
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

JSON="${1:?no json provided}"
if [[ ! -f "${JSON}" ]] ; then
die "json file not found: ${JSON}"
fi

cat "${JSON}" | exec ${SCRIPT_DIR}/bscript debug ${SCRIPT_DIR}/../scripts/create_user_and_network.json --no-login --vars -

+ 49
- 0
bin/first_time_setup.sh View File

@@ -0,0 +1,49 @@
#!/bin/bash
#
# Performs first-time setup after a fresh git clone.
# Installs utility libraries.
#
# Before from running this script, if you want to run the Bubble Server, install dependencies
# with the first_time_ubuntu.sh script. If you're running something other than Ubuntu 18.04,
# please add a first_time_<your-OS>.sh in this directory.
#
# If you're going to run the Bubble Server, you will also need
# Create environment files: ~/.bubble.env and ~/.bubble-test.env (one can be a symlink to the other)
#
# ~/.bubble.env is the environment used by the BubbleServer started by run.sh
# ~/.bubble-test.env is the environment used by the BubbleServer that runs during the integration tests
#

function die {
if [[ -z "${SCRIPT}" ]] ; then
echo 1>&2 "${1}"
else
echo 1>&2 "${SCRIPT}: ${1}"
fi
exit 1
}

BASE=$(cd $(dirname $0)/.. && pwd)
cd ${BASE}

git submodule init || die "Error in git submodule init"
git submodule update || die "Error in git submodule update"

pushd utils/cobbzilla-parent
mvn install || die "Error installing cobbzilla-parent"
popd

UTIL_REPOS="
cobbzilla-parent
cobbzilla-utils
restex
templated-mail-sender
cobbzilla-wizard
"
pushd utils
for repo in ${UTIL_REPOS} ; do
pushd ${repo} && git checkout master && mvn -DskipTests=true -Dcheckstyle.skip=true clean install && popd || die "Error installing ${repo}"
done
popd

INSTALL_WEB=web mvn -DskipTests=true -Dcheckstyle.skip=true clean package || die "Error building bubble jar"

+ 29
- 0
bin/first_time_ubuntu.sh View File

@@ -0,0 +1,29 @@
#!/bin/bash

function die {
echo 1>&2 "${1}"
exit 1
}

# Ensure system is current
sudo apt update -y || die "Error running apt update"
sudo apt upgrade -y || die "Error running apt upgrade"

# Install packages
sudo apt install openjdk-11-jdk maven postgresql-10 redis-server jq python3 python3-pip npm webpack -y || die "Error installing apt packages"
sudo pip3 install setuptools psycopg2-binary || die "Error installing pip packages"

# Create DB user for current user, as superuser
CURRENT_USER="$(whoami)"
sudo su - postgres bash -c 'createuser -U postgres --createdb --createrole --superuser '"${CURRENT_USER}"'' || die "Error creating ${CURRENT_USER} DB user"

PG_HBA=/etc/postgresql/10/main/pg_hba.conf
sudo cat ${PG_HBA} | sed -e 's/ peer/ trust/g' | sed -e 's/ md5/ trust/g' > /tmp/pg_hba.conf || die "Error filtering ${PG_HBA}"
sudo bash -c "cat /tmp/pg_hba.conf > ${PG_HBA}" || die "Error rewriting ${PG_HBA}"
sudo service postgresql restart || die "Error restaring pgsql"

# Create DB user 'bubble', with the ability to create databases
createuser --createdb bubble || die "Error creating bubble DB user"

# Create bubble database
createdb --encoding=UTF-8 bubble || die "Error creating bubble DB"

+ 59
- 0
bin/git_update_bubble.sh View File

@@ -0,0 +1,59 @@
#!/bin/bash
#
# Update repository from master, including submodules, and rebuild bubble jar file
#
# Usage:
#
# git_update_bubble.sh [fast]
#
# fast : if the first argument is 'fast', then don't perform "clean" builds for submodules, just repackage/reinstall them
#
function die {
if [[ -z "${SCRIPT}" ]] ; then
echo 1>&2 "${1}"
else
echo 1>&2 "${SCRIPT}: ${1}"
fi
exit 1
}

FAST=${1}
if [[ ! -z "${FAST}" && "${FAST}" == "fast" ]] ; then
FAST=1
else
FAST=0
fi

BASE=$(cd $(dirname $0)/.. && pwd)
cd ${BASE}

git fetch || die "Error calling git fetch"
git pull origin master || die "Error calling git pull origin master"
git submodule update || die "Error in git submodule update"

pushd utils/cobbzilla-parent
git fetch && git checkout master && git pull origin master && mvn install || die "Error updating/installing cobbzilla-parent"
popd

UTIL_REPOS="
cobbzilla-parent
cobbzilla-utils
restex
templated-mail-sender
cobbzilla-wizard
"
pushd utils
for repo in ${UTIL_REPOS} ; do
if [[ ${FAST} -eq 1 ]] ; then
pushd ${repo} && git fetch && git checkout master && git pull origin master && mvn -DskipTests=true -Dcheckstyle.skip=true install && popd || die "Error installing ${repo}"
else
pushd ${repo} && git fetch && git checkout master && git pull origin master && mvn -DskipTests=true -Dcheckstyle.skip=true clean install && popd || die "Error installing ${repo}"
fi
done
popd

if [[ ${FAST} -eq 1 ]] ; then
mvn -DskipTests=true -Dcheckstyle.skip=true clean package || die "Error building bubble jar"
else
INSTALL_WEB=web mvn -DskipTests=true -Dcheckstyle.skip=true clean package || die "Error building bubble jar"
fi

+ 18
- 0
bin/list_bubble_databases View File

@@ -0,0 +1,18 @@
#!/bin/bash
#
# List bubble databases
#
# Usage:
#
# list_bubble_databases [db-match]
#
# db-match only list databases whose names include this value. default is bubble
#
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)

DB_USER=$(whoami)
DB_BASENAME="${1:-bubble}"

echo "select datname from pg_database" | psql -qt -U ${DB_USER} | tr -d ' ' | grep ${DB_BASENAME}

+ 39
- 0
bin/new_bubble.sh View File

@@ -0,0 +1,39 @@
#!/bin/bash
#
# Launch a new bubble from a sage node
#
# Usage: new_bubble.sh config-file
#
# config-file : a JSON file with parameters to indicate how the bubble should be created
# see models/include/new_bubble.json for all parameters
#
# Minimally required JSON properties:
# sageFqdn : fully-qualified domain name (FQDN) of a bubble sage. This must be a valid sage node.
# network : network name of the bubble to create. This network must *not* already exist.
#
# For example:
# {
# "sageFqdn": "bubble-sage.example.com",
# "network": "mynetwork"
# }
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_ENV : env file to load. Default is ~/.bubble.env or /home/bubble/current/bubble.env (whichever is found first)
# DEBUG_PORT : if set, this is the port number the client will wait for a debugger to attach before starting
# BUBBLE_INCLUDE : when using the sync-model and run-script commands, this is the directory to find included files
# For sync-model and migrate-model, the default is the current directory.
# For run-script, the default is a directory named "tests" within the current directory
# BUBBLE_SCRIPTS : location of run.sh script. Default is to assume it is in the same directory containing this script
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

CONFIG_JSON="${1:?no config json provided}"
shift

cat ${CONFIG_JSON} | exec ${SCRIPT_DIR}/bscript ${SCRIPT_DIR}/../scripts/new_bubble.json ${@} --call-include --vars -

+ 104
- 0
bin/prep_bubble_jar View File

@@ -0,0 +1,104 @@
#!/bin/bash
#
# Prepares the bubble.jar file for active usage.
#
# 1. Update role JSON in bubble-server/src/main/resources/ansible/default_roles.json
# Inserts "tgzB64" value with file://path to tarball
#
# 2. Copy scripts to bubble-server/target/classes/scripts
#
# 3. If the environment variable INSTALL_WEB is equal to "web", also build and install the bubble-web
# site to bubble-server/target/classes/site
#
# Usage:
#
# prep_bubble_jar
#
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

BUBBLE_SERVER="$(cd "${SCRIPT_DIR}/../bubble-server" && pwd)"
CLASSES_DIR="${BUBBLE_SERVER}/target/classes"
DEFAULT_ROLES_RELATIVE="ansible/default_roles.json"
DEFAULT_ROLES="${CLASSES_DIR}/${DEFAULT_ROLES_RELATIVE}"
if [[ ! -f ${DEFAULT_ROLES} ]] ; then
die "default roles file not found: ${DEFAULT_ROLES}"
fi
if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then
for f in "${HOME}/bubble/current/bubble.env" "${HOME}/.bubble.env" ; do
if [[ -f "${f}" ]] ; then
LOCALSTORAGE_BASE_DIR=$(cat ${f} | grep -v '^#' | grep LOCALSTORAGE_BASE_DIR | awk -F '=' '{print $2}' | tr -d ' ')
break
fi
done
fi
if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then
echo "Warning: LOCALSTORAGE_BASE_DIR env var not defined and no bubble.env found, using ${HOME}/.bubble_local_storage"
LOCALSTORAGE_BASE_DIR="${HOME}/.bubble_local_storage"
fi

if [[ -z "${BUBBLE_JAR}" ]] ; then
die "bubble jar not found: ${BUBBLE_JAR}"
fi

ROLES_DIR="$(cd "${SCRIPT_DIR}/../automation/roles" && pwd)"
if [[ ! -d ${ROLES_DIR} ]] ; then
die "automation/roles dir not found: ${ROLES_DIR}"
fi

echo "lbs = ${LOCALSTORAGE_BASE_DIR}"

UPDATED="$(mktemp /tmp/default_roles.XXXXXXX.json)"
cd ${ROLES_DIR}
echo "[" > "${UPDATED}"
for role in $(ls -1) ; do
echo "Processing role: ${role}"
ROLE_JSON="${role}/files/bubble_role.json"
if [[ ! -f "${ROLE_JSON}" ]] ; then
die "Json file not found for role ${role}: ${ROLE_JSON}"
fi
if [[ $(cat ${UPDATED} | wc -c) -gt 2 ]] ; then
echo "," >> ${UPDATED}
fi

role_name="$(cat "${ROLE_JSON}" | jq -r .name)"
role_path="automation/roles/${role_name}.tgz"
TGZ_PATH="${LOCALSTORAGE_BASE_DIR}/${role_path}"
mkdir -p $(dirname ${TGZ_PATH}) || die "Error creating parent dir for ${TGZ_PATH}"
tar czf ${TGZ_PATH} ${role}

cat ${ROLE_JSON} | jq --arg tgzB64 "storage://LocalStorage/${role_path}" '. + {tgzB64: $tgzB64}' >> ${UPDATED}

echo "------------------------------"
echo "Generated role JSON: ${role}"
echo "------------------------------"
done

echo "]" >> ${UPDATED}

jq . < ${UPDATED} > ${DEFAULT_ROLES} || die "Error writing ${DEFAULT_ROLES}, maybe some problems with ${UPDATED} ?"

echo "------------------------------------------------------------"
cat "${UPDATED}"
echo "------------------------------------------------------------"

mkdir -p ${CLASSES_DIR}/scripts
for script in run.sh bubble_common bubble bget bpost bposte bput bpute bdelete bscript bmodel bencrypt bdecrypt list_bubble_databases cleanup_bubble_databases ; do
cp ${SCRIPT_DIR}/${script} ${CLASSES_DIR}/scripts || die "Error copying ${SCRIPT_DIR}/${script} -> ${CLASSES_DIR}/scripts"
done

cd ${CLASSES_DIR} && jar uvf ${BUBBLE_JAR} scripts ${DEFAULT_ROLES_RELATIVE} || die "Error updating ${BUBBLE_JAR} with scripts"
echo "Updated $(ls -1 ${ROLES_DIR} | wc -l) roles in ${DEFAULT_ROLES}"

rm -f "${UPDATED}"

if [[ ! -z "${INSTALL_WEB}" && "${INSTALL_WEB}" == "web" ]] ; then
mkdir -p ${CLASSES_DIR}/site
BUBBLE_WEB="$(cd "${SCRIPT_DIR}/../bubble-web" && pwd)"
cd ${BUBBLE_WEB} && npm install && webpack || die "Error building bubble-web"
cp -R ${BUBBLE_WEB}/dist/* ${CLASSES_DIR}/site/ || die "Error copying ${BUBBLE_WEB}/dist/* -> ${CLASSES_DIR}/site/"
cd ${CLASSES_DIR} && jar uvf ${BUBBLE_JAR} site || die "Error updating ${BUBBLE_JAR} with site"
echo "Installed bubble-web to ${CLASSES_DIR}/site/"
fi

+ 36
- 0
bin/proxy View File

@@ -0,0 +1,36 @@
#!/bin/bash
#
# Request a URL via the reverse proxy
#
# Usage: proxy url outfile
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_SCRIPTS : location of run.sh script. Default is to assume it is in the same directory containing this script
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

URL="${1:?no URL provided}"
OUTFILE="${2:?no outfile provided}"
RAW=${3:-true}

if [[ "${URL}" =~ http:// ]] ; then
URL="http/$(echo -n ${URL} | cut -d/ -f3-)"
elif [[ "${URL}" =~ https:// ]] ; then
URL="https/$(echo -n ${URL} | cut -d/ -f3-)"
else
URL="http/$(echo -n ${URL} | cut -d/ -f3-)"
fi

PROXY_SCRIPT="${SCRIPT_DIR}/../scripts/proxy.json"
if [[ ! -f "${PROXY_SCRIPT}" ]] ; then
die "proxy API script not found: ${PROXY_SCRIPT}"
fi

echo '{"URL": "'"${URL}"'", "OUTFILE": "'"${OUTFILE}"'", "RAW": "'"${RAW}"'"}' \
| ${SCRIPT_DIR}/bscript ${PROXY_SCRIPT} --vars -

+ 47
- 0
bin/reset_bubble_db View File

@@ -0,0 +1,47 @@
#!/bin/bash
#
# Reset the local "bubble" database
#
# Usage: reset_bubble_db [debug]
#
# debug : set this to 'debug' to enable debugging
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

DEBUG=${1}
if [[ ! -z "${DEBUG}" && "${DEBUG}" == "debug" ]] ; then
DEBUG=1
else
DEBUG=0
fi

BUBBLE_SERVER="$(cd ${SCRIPT_DIR}/../bubble-server && pwd)"
if [[ ! -d "${BUBBLE_SERVER}" ]] ; then
die "bubble-server dir not found: ${BUBBLE_SERVER}"
fi

BUBBLE_TARGET=${BUBBLE_SERVER}/target
META_DIR="${BUBBLE_TARGET}/classes/META-INF/bubble/"
mkdir -p ${META_DIR} || die "Error creating META-INF dir: ${META_DIR}"

SQL_DIR="${BUBBLE_SERVER}/target/classes/META-INF/"
if [[ ! -d "${SQL_DIR}" ]] ; then
die "config dir not found: ${SQL_DIR}"
fi
SQL_DIR="$(cd ${SQL_DIR} && pwd)"

if [[ ${DEBUG} -eq 1 ]] ; then
cd ${SCRIPT_DIR}/.. && \
mvn -Dmaven.surefire.debug="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=5005 -Xnoagent -Djava.compiler=NONE" \
-Dtest=bubble.test.DbInit -Ddb.dump=${SQL_DIR}/bubble.sql test \
|| exit 1
else
cd ${SCRIPT_DIR}/.. && \
mvn -Dtest=bubble.test.DbInit -Ddb.dump=${SQL_DIR}/bubble.sql test || exit 1
fi

dropdb bubble ; createdb bubble && cat ${SQL_DIR}/bubble.sql | psql bubble
echo "Successfully initialized DB schema from:"
echo ${SQL_DIR}/bubble.sql

+ 115
- 0
bin/run.sh View File

@@ -0,0 +1,115 @@
#!/bin/bash
#
# Run Bubble server or CLI commands. A wrapper for starting a JVM to run Bubble programs.
#
# Usage: run.sh [debug [debug-port]] [command] [args]
#
# All arguments are optional:
#
# debug : if the first argument is the literal string 'debug' then immediately after starting,
# the Java process will wait for a debugger to attach. Default is not to enable debugging.
# debug-port : the port that will be listened on for the debugger. Default port is 5005
# command : the CLI command to run, or 'server' to run BUBBLE API server. Default is to run Bubble API server
# args : depends on the command. Use '-h' to request help for a command
#
# Environment variables
#
# BUBBLE_ENV : env file to load, used when performing handlebars substitutions on entities marked
# with `"_subst": true` JSON attribute. Default is ~/.bubble.env
# BUBBLE_JVM_OPTS : Java options. Defaults to "-Xmx4096 -Xms4096"
# BUBBLE_JAR : location of bubble uberjar. Default is to assume there is exactly one bubble-server*.jar file in a
# directory named "target" that is in the same directory as this script
#
# Environment variables for API commands
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
#
#
SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

# fail on any command error
set -e

BASE=$(cd $(dirname $0) && pwd)
if [[ $(basename ${BASE}) != "bubble-server" && -d "${BASE}/bubble-server" ]] ; then
BASE="${BASE}/bubble-server"
fi
if [[ $(basename ${BASE}) == "bin" && -d "${BASE}/../bubble-server" ]] ; then
BASE="$(cd ${BASE}/../bubble-server && pwd)"
fi

# save explicitly set key, if we have one
SAVED_DB_KEY=""
if [[ ! -z "${BUBBLE_DB_ENCRYPTION_KEY}" ]] ; then
SAVED_DB_KEY="${BUBBLE_DB_ENCRYPTION_KEY}"
fi

if [[ -z "${BUBBLE_ENV}" ]] ; then
BUBBLE_ENV="${HOME}/.bubble.env"
if [[ ! -f "${BUBBLE_ENV}" ]] ; then
BUBBLE_ENV="/home/bubble/current/bubble.env"
fi
fi
if [[ -f ${BUBBLE_ENV} ]] ; then
echo 1>&2 "Loading env: ${BUBBLE_ENV}"
. ${BUBBLE_ENV}
fi

if [[ ! -z "${SAVED_DB_KEY}" ]] ; then
export BUBBLE_DB_ENCRYPTION_KEY="${SAVED_DB_KEY}"
fi

debug="${1}"
if [[ "x${debug}" == "xdebug" ]] ; then
shift
ARG_LEN=$(echo -n "${1}" | wc -c)
ARG_NUMERIC_LEN=$(echo -n "${1}" | tr -dc [:digit:] | wc -c) # strip all non-digits
if [[ ! -z "${ARG_NUMERIC_LEN}" && ${ARG_LEN} -eq ${ARG_NUMERIC_LEN} ]] ; then
# Second arg is the debug port
DEBUG_PORT="${1}"
shift || :
fi
if [[ -z "${DEBUG_PORT}" ]] ; then
DEBUG_PORT=5005
fi
debug="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=${DEBUG_PORT}"
else
debug=""
fi

command="${1}"
server=0
DEFAULT_JVM_OPTS=""
if [[ -z "${command}" ]] ; then
server=1
CLASS=bubble.server.BubbleServer
DEFAULT_JVM_OPTS="-Xmx512m -Xms512m"

else
CLASS=bubble.main.BubbleMain
DEFAULT_JVM_OPTS="-Xmx256m -Xms64m"
shift
fi

if [[ -z "${BUBBLE_JAR}" ]] ; then
die "API jar file not found in ${BASE}/target"
fi

if [[ -z "${BUBBLE_JVM_OPTS}" ]] ; then
BUBBLE_JVM_OPTS="${DEFAULT_JVM_OPTS}"
fi
BUBBLE_JVM_OPTS="${BUBBLE_JVM_OPTS} -Djava.net.preferIPv4Stack=true"

# Choose appropriate log config
if [[ ${server} -eq 1 ]] ; then
LOG_CONFIG="-Dlogback.configurationFile=logback.xml"
else
LOG_CONFIG="-Dlogback.configurationFile=logback-client.xml"
fi

# Run!
BUBBLE_JAR="${BUBBLE_JAR}" java ${LOG_CONFIG} ${BUBBLE_JVM_OPTS} ${debug} -server -cp ${BUBBLE_JAR} ${CLASS} ${command} "${@}"

+ 63
- 0
bin/update_role View File

@@ -0,0 +1,63 @@
#!/bin/bash
#
# Update a role on a remote node
#
# Usage:
#
# update_role domain role-dir
#
# domain : a bubble domain
# role-dir : the role directory
#
# Environment variables
#
# BUBBLE_API : which API to use. Default is local (http://127.0.0.1:PORT, where PORT is found in .bubble.env)
# BUBBLE_USER : account to use. Default is root
# BUBBLE_PASS : password for account. Default is root
# BUBBLE_INCLUDE : path to look for JSON include files. default value is to assume we are being run from
# bubble repo, bubble-models repo, or bubble-client and use include files from minimal model.
#

SCRIPT="${0}"
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd)
. ${SCRIPT_DIR}/bubble_common

DOMAIN=${1:?no domain provided}
ROLE_DIR="${2:?no role dir provided}"
ROLE_TO_UPDATE="$(basename "${ROLE_DIR}")"
ROLE_JSON="${ROLE_DIR}/files/bubble_role.json"

if [[ ! -d "${ROLE_DIR}" ]] ; then
die "Role dir does not exist or is not a directory: ${ROLE_DIR}"
fi
if [[ ! -f "${ROLE_DIR}/tasks/main.yml" ]] ; then
die "Role dir is invalid (missing tasks/main.yml): ${ROLE_DIR}"
fi
if [[ ! -f "${ROLE_JSON}" ]] ; then
die "Role json does not exist: ${ROLE_JSON}"
fi

role_json_name="$(cat ${ROLE_JSON} | jq -r .name)"
if [[ -z "${role_json_name}" ]] ; then
die "Role json does not have a name field"
fi

role_name="$(echo -n ${role_json_name} | cut -d- -f1)"
if [[ "${role_name}" != "${ROLE_TO_UPDATE}" ]] ; then
die "Role name in JSON (${role_name}) does not match dir basename: ${ROLE_DIR}"
fi

# update tgzB64 in JSON
temp_tar_b64="$(mktemp /tmp/${role_name}.XXXXXXX.tgz.b64)"
echo -n '"' > "${temp_tar_b64}"
cd ${ROLE_DIR}/.. && tar cz $(basename ${ROLE_DIR}) | base64 -w0 >> ${temp_tar_b64} && cd - > /dev/null
echo -n '"' >> "${temp_tar_b64}"

ROLE_JSON="$(cat ${ROLE_JSON} | jq --argfile tgzB64 "${temp_tar_b64}" '. + {tgzB64: $tgzB64}')"

rm -f "${temp_tar_b64}"
#echo "${temp_tar_b64}"
#exit 0

echo "{\"domain\": \"${DOMAIN}\", \"update_role\": \"${ROLE_TO_UPDATE}\", \"role\": ${ROLE_JSON} }" | \
exec ${SCRIPT_DIR}/bscript ${SCRIPT_DIR}/../scripts/update_role.json --vars -

+ 283
- 0
bubble-server/pom.xml View File

@@ -0,0 +1,283 @@
<?xml version="1.0" encoding="UTF-8"?>

<!--
(c) Copyright 2019 jonathan cobb
For personal or non-commercial use, this code is available under the GNU Affero General Public License, version 3:
https://www.gnu.org/licenses/agpl-3.0.html
For commercial use, please contact jonathan@kyuss.org
-->
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<parent>
<groupId>bubble</groupId>
<artifactId>bubble</artifactId>
<version>1.0.0-SNAPSHOT</version>
</parent>

<artifactId>bubble-server</artifactId>
<version>1.0.0-SNAPSHOT</version>

<repositories>
<repository>
<id>jitpack.io</id>
<url>https://jitpack.io</url>
</repository>
</repositories>

<dependencies>
<dependency>
<groupId>org.cobbzilla</groupId>
<artifactId>wizard-server</artifactId>
<version>1.0.0-SNAPSHOT</version>
<exclusions>
<!-- need to bump version to latest, then refactor AbstractElasticSearchDAO -->
<exclusion>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.cobbzilla</groupId>
<artifactId>wizard-client</artifactId>
<version>1.0.0-SNAPSHOT</version>
</dependency>

<!-- RDBMS persistence -->
<dependency>
<groupId>org.hibernate</groupId>
<artifactId>hibernate-core</artifactId>
<version>${hibernate.version}</version>
</dependency>
<dependency>
<groupId>org.javassist</groupId>
<artifactId>javassist</artifactId>
<version>${javassist.version}</version>
</dependency>
<dependency>
<groupId>cglib</groupId>
<artifactId>cglib</artifactId>
<version>${cglib.version}</version>
</dependency>
<dependency>
<groupId>${jdbcDriver.postgres.groupId}</groupId>
<artifactId>${jdbcDriver.postgres.artifactId}</artifactId>
<version>${jdbcDriver.postgres.version}</version>
</dependency>

<dependency>
<groupId>org.cobbzilla</groupId>
<artifactId>templated-mail-sender</artifactId>
<version>1.0.0-SNAPSHOT</version>
</dependency>

<dependency>
<groupId>org.glassfish.grizzly</groupId>
<artifactId>grizzly-http</artifactId>
<version>${grizzly.version}</version>
</dependency>
<dependency>
<groupId>org.glassfish.grizzly</groupId>
<artifactId>grizzly-http-server</artifactId>
<version>${grizzly.version}</version>
</dependency>
<dependency>
<groupId>org.glassfish.grizzly</groupId>
<artifactId>grizzly-http-servlet</artifactId>
<version>${grizzly.version}</version>
</dependency>

<dependency>
<groupId>org.glassfish.grizzly</groupId>
<artifactId>grizzly-framework-monitoring</artifactId>
<version>${grizzly.version}</version>
</dependency>
<dependency>
<groupId>org.glassfish.grizzly</groupId>
<artifactId>grizzly-http-monitoring</artifactId>
<version>${grizzly.version}</version>
</dependency>
<dependency>
<groupId>org.glassfish.grizzly</groupId>
<artifactId>grizzly-http-server-monitoring</artifactId>
<version>${grizzly.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-proxy</artifactId>
<version>${jetty.version}</version>
</dependency>

<!--&lt;!&ndash; https://mvnrepository.com/artifact/org.atmosphere/atmosphere-jersey &ndash;&gt;-->
<!--<dependency>-->
<!--<groupId>org.atmosphere</groupId>-->
<!--<artifactId>atmosphere-jersey</artifactId>-->
<!--<version>2.5.4</version>-->
<!--</dependency>-->

<!--<dependency>-->
<!--<groupId>org.atmosphere</groupId>-->
<!--<artifactId>atmosphere-runtime</artifactId>-->
<!--<version>2.5.4</version>-->
<!--</dependency>-->
<!--<dependency>-->
<!--<groupId>org.glassfish.jersey.containers</groupId>-->
<!--<artifactId>jersey-container-servlet</artifactId>-->
<!--<version>2.29</version>-->
<!--</dependency>-->
<!--&lt;!&ndash; https://mvnrepository.com/artifact/org.apache.geronimo.specs/geronimo-servlet_3.0_spec &ndash;&gt;-->
<!--<dependency>-->
<!--<groupId>org.apache.geronimo.specs</groupId>-->
<!--<artifactId>geronimo-servlet_3.0_spec</artifactId>-->
<!--<version>1.0</version>-->
<!--<scope>provided</scope>-->
<!--</dependency>-->
<!--<dependency>-->
<!--<groupId>org.atmosphere.client</groupId>-->
<!--<artifactId>javascript</artifactId>-->
<!--<version>2.3.9</version>-->
<!--<type>war</type>-->
<!--</dependency>-->

<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>${commons-codec.version}</version>
</dependency>

<dependency>
<groupId>redis.clients</groupId>
<artifactId>jedis</artifactId>
<version>${jedis.version}</version>
</dependency>

<dependency>
<groupId>com.maxmind.geoip2</groupId>
<artifactId>geoip2</artifactId>
<version>2.12.0</version>
</dependency>

<dependency>
<groupId>com.warrenstrange</groupId>
<artifactId>googleauth</artifactId>
<version>1.4.0</version>
</dependency>

<dependency>
<groupId>com.twilio.sdk</groupId>
<artifactId>twilio</artifactId>
<version>7.44.0</version>
</dependency>

<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-s3</artifactId>
<version>1.11.659</version>
</dependency>
<dependency>
<groupId>com.stripe</groupId>
<artifactId>stripe-java</artifactId>
<version>16.2.0</version>
</dependency>

<dependency>
<groupId>org.cobbzilla</groupId>
<artifactId>restex</artifactId>
<version>1.0.1-SNAPSHOT</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.cobbzilla</groupId>
<artifactId>wizard-server-test</artifactId>
<version>1.0.0-SNAPSHOT</version>
<scope>test</scope>
</dependency>

<!--<dependency>-->
<!--<groupId>org.cobbzilla</groupId>-->
<!--<artifactId>wizard-server-test</artifactId>-->
<!--<version>1.0.0-SNAPSHOT</version>-->
<!--</dependency>-->
<!-- https://mvnrepository.com/artifact/io.swagger/swagger-jersey2-jaxrs -->

<dependency>
<groupId>io.swagger</groupId>
<artifactId>swagger-jersey2-jaxrs</artifactId>
<version>1.5.24</version>
</dependency>

</dependencies>

<build>
<resources>
<resource>
<directory>src/main/resources</directory>
</resource>
</resources>

<plugins>
<!-- Building the executable uberjar -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<version>2.1</version>
<executions>
<execution>
<phase>package</phase>
<goals><goal>shade</goal></goals>
<configuration>
<transformers>
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
<mainClass>bubble.server.BubbleServer</mainClass>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>META-INF/spring.handlers</resource>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>META-INF/spring.schemas</resource>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
</transformers>
<!-- Exclude signed jars to avoid errors
see: http://stackoverflow.com/a/6743609/1251543
-->
<filters>
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>META-INF/*.SF</exclude>
<exclude>META-INF/*.DSA</exclude>
<exclude>META-INF/*.RSA</exclude>
</excludes>
</filter>
</filters>
</configuration>
</execution>
</executions>
</plugin>

<!-- update ansible/default_roles.json, copy scripts into jar -->
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
<version>1.5.0</version>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>${project.basedir}/../bin/prep_bubble_jar</executable>
</configuration>
</execution>
</executions>
</plugin>
</plugins>

</build>

</project>

+ 217
- 0
bubble-server/src/main/java/bubble/ApiConstants.java View File

@@ -0,0 +1,217 @@
package bubble;

import bubble.model.cloud.BubbleNode;
import com.fasterxml.jackson.databind.JsonNode;
import com.warrenstrange.googleauth.GoogleAuthenticator;
import lombok.Getter;
import org.apache.commons.lang3.RandomUtils;
import org.cobbzilla.util.io.FileUtil;
import org.glassfish.grizzly.http.server.Request;
import org.glassfish.jersey.server.ContainerRequest;

import javax.ws.rs.core.Context;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.function.Predicate;
import java.util.function.Supplier;

import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric;
import static org.apache.http.HttpHeaders.ACCEPT_LANGUAGE;
import static org.apache.http.HttpHeaders.USER_AGENT;
import static org.cobbzilla.util.daemon.ZillaRuntime.die;
import static org.cobbzilla.util.io.FileUtil.abs;
import static org.cobbzilla.util.io.StreamUtil.stream2string;
import static org.cobbzilla.util.json.JsonUtil.json;
import static org.cobbzilla.wizard.resources.ResourceUtil.invalidEx;

public class ApiConstants {

@Getter(lazy=true) private static final String bubbleDefaultDomain = initDefaultDomain();
private static String initDefaultDomain() {
final File f = new File(HOME_DIR, ".BUBBLE_DEFAULT_DOMAIN");
final String domain = FileUtil.toStringOrDie(f);
return domain != null ? domain.trim() : die("initDefaultDomain: "+abs(f)+" not found");
}

public static final BubbleNode NULL_NODE = new BubbleNode() {
@Override public String getUuid() { return "NULL_UUID"; }
};

public static final String ENV_BUBBLE_JAR = "BUBBLE_JAR";

public static final GoogleAuthenticator G_AUTH = new GoogleAuthenticator();

public static final Predicate ALWAYS_TRUE = m -> true;
public static final String HOME_DIR = System.getProperty("user.home");

public static final File CACERTS_DIR = new File(HOME_DIR, "cacerts");
public static final String MITMPROXY_CA_CERT_BASE = "bubble-ca-cert.";
public static final File MITMPROXY_CERT_DIR = new File(HOME_DIR, "mitm_certs");

public static final String META_PROP_BUBBLE_VERSION = "bubble.version";

public static final String SESSION_HEADER = "X-Bubble-Session";

public static final String ENTITY_CONFIGS_ENDPOINT = "/ec";

public static final String AUTH_ENDPOINT = "/auth";
public static final String EP_ACTIVATE = "/activate";
public static final String EP_CONFIGS = "/configs";
public static final String EP_REGISTER = "/register";
public static final String EP_LOGIN = "/login";
public static final String EP_LOGOUT = "/logout";
public static final String EP_FORGOT_PASSWORD = "/forgotPassword";
public static final String EP_CHANGE_PASSWORD = "/changePassword";
public static final String EP_CA_CERT = "/cacert";
public static final String EP_SCRIPT = "/script";
public static final String EP_APPROVE = "/approve";
public static final String EP_DENY = "/deny";
public static final String EP_AUTHENTICATOR = "/authenticator";

public static final String ACCOUNTS_ENDPOINT = "/users";
public static final String EP_POLICY = "/policy";
public static final String EP_CONTACTS = "/contacts";
public static final String EP_REQUEST = "/request";
public static final String EP_DOWNLOAD = "/download";

public static final String SESSIONS_ENDPOINT = "/sessions";
public static final String MESSAGES_ENDPOINT = "/messages";
public static final String TIMEZONES_ENDPOINT = "/timezones";

public static final String APPS_ENDPOINT = "/apps";
public static final String DRIVERS_ENDPOINT = "/drivers";
public static final String CLOUDS_ENDPOINT = "/clouds";
public static final String ROLES_ENDPOINT = "/roles";
public static final String PROXY_ENDPOINT = "/p";
public static final String DATA_ENDPOINT = "/d";

public static final String NOTIFY_ENDPOINT = "/notify";
public static final String EP_READ_METADATA = "/meta";
public static final String EP_READ = "/read";
public static final String EP_LIST = "/list";
public static final String EP_LIST_NEXT = "/listNext";
public static final String EP_WRITE = "/write";
public static final String EP_DELETE = "/meta";
public static final String EP_REKEY = "/rekey";

public static final String DOMAINS_ENDPOINT = "/domains";
public static final String PLANS_ENDPOINT = "/plans";
public static final String FOOTPRINTS_ENDPOINT = "/footprints";
public static final String BACKUPS_ENDPOINT = "/backups";
public static final String EP_CLEAN_BACKUPS = "/clean";
public static final String PAYMENT_METHODS_ENDPOINT = "/paymentMethods";

public static final String ME_ENDPOINT = "/me";
public static final String EP_APPS = APPS_ENDPOINT;
public static final String EP_RULES = "/rules";
public static final String EP_MATCHERS = "/matchers";
public static final String EP_DATA = "/data";
public static final String EP_SITES = "/sites";
public static final String EP_DRIVERS = DRIVERS_ENDPOINT;
public static final String EP_CLOUDS = CLOUDS_ENDPOINT;
public static final String EP_DOMAINS = DOMAINS_ENDPOINT;
public static final String EP_NETWORKS = "/networks";
public static final String EP_PLANS = PLANS_ENDPOINT;
public static final String EP_TAGS = "/tags";
public static final String EP_NODES = "/nodes";
public static final String EP_DEVICES = "/devices";
public static final String EP_VPN = "/vpn";
public static final String EP_PAYMENT_METHOD = "/paymentMethod";
public static final String EP_PAYMENT_METHODS = PAYMENT_METHODS_ENDPOINT;
public static final String EP_PAYMENTS = "/payments";
public static final String EP_BILLS = "/bills";
public static final String EP_CLOSEST = "/closest";
public static final String EP_ROLES = ROLES_ENDPOINT;
public static final String EP_SENT_NOTIFICATIONS = "/notifications/outbox";
public static final String EP_RECEIVED_NOTIFICATIONS = "/notifications/inbox";
public static final String EP_STORAGE = "/storage";
public static final String EP_DNS = "/dns";
public static final String EP_BACKUPS = "/backups";
public static final String EP_FIND_DNS = "/find";
public static final String EP_DIG_DNS = "/dig";
public static final String EP_UPDATE_DNS = "/update";
public static final String EP_DELETE_DNS = "/remove";
public static final String EP_FOOTPRINTS = FOOTPRINTS_ENDPOINT;
public static final String EP_ACTIONS = "/actions";
public static final String EP_START = "/start";
public static final String EP_STOP = "/stop";
public static final String EP_RESTORE = "/restore";
public static final String EP_KEYS = "/keys";
public static final String EP_FORK = "/fork";

public static final String DEBUG_ENDPOINT = "/debug";
public static final String BUBBLE_MAGIC_ENDPOINT = "/.bubble";
public static final String EP_ASSETS = "/assets";

public static final String FILTER_HTTP_ENDPOINT = "/filter";
public static final String EP_APPLY = "/apply";

public static final int MAX_NOTIFY_LOG = 10000;
public static final int ERROR_MAXLEN = 2000;

public static String getToken(String json) {
if (json == null) return null;
final JsonNode val = json(json, JsonNode.class).get("token");
return val == null ? null : val.textValue();
}

@Getter(lazy=true) private static final String[] hostPrefixes = stream2string("bubble/host-prefixes.txt").split("\n");

public static String newNodeHostname() {
final String rand0 = getHostPrefixes()[RandomUtils.nextInt(0, getHostPrefixes().length)];
final int rand1 = RandomUtils.nextInt(0, 100);
final String rand2 = randomAlphanumeric(2).toLowerCase();
final int rand3 = RandomUtils.nextInt(0, 10);
final String rand4 = randomAlphanumeric(1).toLowerCase();
return rand0+"-"+(rand1 < 10 ? "0"+rand1 : rand1)+rand2+"-"+rand3+rand4;
}

public static String getRemoteHost(Request req) {
final String remoteHost = req.getHeader("X-Forwarded-For");
return remoteHost == null ? req.getRemoteAddr() : remoteHost;
}

public static String getUserAgent(ContainerRequest ctx) { return ctx.getHeaderString(USER_AGENT); }

public static final String DETECT_LOCALE = "detect";

public static List<String> getLocales(ContainerRequest ctx, String defaultLocale) {
final List<String> locales = new ArrayList<>();
final String langHeader = ctx.getHeaderString(ACCEPT_LANGUAGE);
if (langHeader == null) {
locales.add(defaultLocale);
return locales;
}
final String[] parts = langHeader.split(",");
for (String part : parts) {
final String[] subParts = part.split(",");
for (String val : subParts) {
locales.add(val.replaceAll("-", "_").replaceFirst(";.*", "").trim());
}
}
locales.add(defaultLocale);
return locales;
}

public static String normalizeLangHeader(@Context Request req) {
String langHeader = req.getHeader(ACCEPT_LANGUAGE);
if (langHeader == null) return null;

// remove everything after the first comma, change hyphens to underscores
int comma = langHeader.indexOf(',');
if (comma != -1) langHeader = langHeader.substring(0, comma);
return langHeader.replace('-', '_');
}

public static boolean isHttpsPort(int sslPort) { return sslPort % 1000 == 443; }

public static <T extends Enum> T enumFromString(Class<T> e, String v) {
return Arrays.stream(e.getEnumConstants())
.filter(t->t.name().equalsIgnoreCase(v))
.findFirst()
.orElseThrow((Supplier<RuntimeException>) () ->
invalidEx("err."+e.getSimpleName()+".invalid", "Invalid "+e.getSimpleName()+": "+v, v));
}
}

+ 22
- 0
bubble-server/src/main/java/bubble/BubbleHandlebars.java View File

@@ -0,0 +1,22 @@
package bubble;

import com.github.jknack.handlebars.Handlebars;
import lombok.Getter;
import org.cobbzilla.util.handlebars.HandlebarsUtil;
import org.cobbzilla.util.handlebars.HasHandlebars;
import org.cobbzilla.util.javascript.StandardJsEngine;

public class BubbleHandlebars implements HasHandlebars {

public static final BubbleHandlebars instance = new BubbleHandlebars();

@Getter(lazy=true) private final Handlebars handlebars = initHandlebars();
private Handlebars initHandlebars() {
final Handlebars hbs = new Handlebars(new HandlebarsUtil(ApiConstants.class.getSimpleName()));
HandlebarsUtil.registerUtilityHelpers(hbs);
HandlebarsUtil.registerDateHelpers(hbs);
HandlebarsUtil.registerJavaScriptHelper(hbs, StandardJsEngine::new);
return hbs;
}

}

+ 61
- 0
bubble-server/src/main/java/bubble/auth/BubbleAuthFilter.java View File

@@ -0,0 +1,61 @@
package bubble.auth;

import bubble.dao.account.AccountDAO;
import bubble.model.account.Account;
import bubble.server.BubbleConfiguration;
import lombok.Getter;
import org.cobbzilla.util.collection.ArrayUtil;
import org.cobbzilla.util.collection.SingletonSet;
import org.cobbzilla.wizard.filters.auth.AuthFilter;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import javax.ws.rs.container.ContainerRequestContext;
import javax.ws.rs.container.PreMatching;
import javax.ws.rs.ext.Provider;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;

import static bubble.ApiConstants.*;
import static bubble.server.BubbleServer.isRestoreMode;

@Provider @Service @PreMatching
public class BubbleAuthFilter extends AuthFilter<Account> {

public static final Set<String> SKIP_AUTH_PREFIXES = new HashSet<>(Arrays.asList(
AUTH_ENDPOINT, ENTITY_CONFIGS_ENDPOINT, BUBBLE_MAGIC_ENDPOINT, MESSAGES_ENDPOINT, TIMEZONES_ENDPOINT,
NOTIFY_ENDPOINT, FILTER_HTTP_ENDPOINT
));
public static final Set<String> SKIP_AUTH_PATHS = new SingletonSet<>(AUTH_ENDPOINT);
public static final Set<String> SKIP_ALL_AUTH = new SingletonSet<>("/");
public static final Set<String> SKIP_AUTH_RESTORE = new HashSet<>(Arrays.asList(new String[] {
AUTH_ENDPOINT, BUBBLE_MAGIC_ENDPOINT, NOTIFY_ENDPOINT, MESSAGES_ENDPOINT
}));
public static final Set<String> SKIP_AUTH_TEST = new HashSet<>(Arrays.asList(ArrayUtil.append(SKIP_AUTH_PREFIXES.toArray(new String[0]),
DEBUG_ENDPOINT
)));

@Autowired @Getter private AccountDAO accountDAO;
@Autowired @Getter private BubbleAuthProvider authProvider;
@Autowired @Getter private BubbleConfiguration configuration;

@Override protected String getAuthTokenHeader() { return SESSION_HEADER; }

@Override protected Set<String> getSkipAuthPaths() {
if (configuration.isTestMode()) return SKIP_AUTH_TEST;
return isRestoreMode() ? SKIP_AUTH_RESTORE : SKIP_AUTH_PATHS;
}

@Override protected Set<String> getSkipAuthPrefixes() {
if (!accountDAO.activated()) return SKIP_ALL_AUTH;
if (configuration.isTestMode()) return SKIP_AUTH_TEST;
if (isRestoreMode()) return SKIP_AUTH_RESTORE;
return SKIP_AUTH_PREFIXES;
}

@Override protected boolean isPermitted(Account principal, ContainerRequestContext request) {
return principal != null;
}

}

+ 16
- 0
bubble-server/src/main/java/bubble/auth/BubbleAuthProvider.java View File

@@ -0,0 +1,16 @@
package bubble.auth;

import bubble.dao.SessionDAO;
import bubble.model.account.Account;
import org.cobbzilla.wizard.filters.auth.AuthProvider;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

@Service
public class BubbleAuthProvider implements AuthProvider<Account> {

@Autowired private SessionDAO sessionDAO;

@Override public Account find(String uuid) { return sessionDAO.find(uuid); }

}

+ 29
- 0
bubble-server/src/main/java/bubble/client/BubbleApiClient.java View File

@@ -0,0 +1,29 @@
package bubble.client;

import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.cobbzilla.util.http.ApiConnectionInfo;
import org.cobbzilla.wizard.client.ApiClientBase;

import static bubble.ApiConstants.SESSION_HEADER;

public class BubbleApiClient extends ApiClientBase {

@SuppressWarnings("unused") // called by ApiClientBase.copy
public BubbleApiClient(BubbleApiClient other) {
this(other.getConnectionInfo());
setToken(other.getToken());
}

public BubbleApiClient(ApiConnectionInfo connectionInfo) { super(connectionInfo); }

@Override public String getTokenHeader() { return SESSION_HEADER; }

public static HttpClientBuilder newHttpClientBuilder(int max, int maxPerRoute) {
final PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager();
cm.setMaxTotal(max);
cm.setDefaultMaxPerRoute(maxPerRoute);
return HttpClientBuilder.create().setConnectionManager(cm);
}

}

+ 146
- 0
bubble-server/src/main/java/bubble/client/BubbleNodeClient.java View File

@@ -0,0 +1,146 @@
package bubble.client;

import bubble.dao.cloud.BubbleNodeKeyDAO;
import bubble.model.cloud.BubbleNode;
import bubble.model.cloud.BubbleNodeKey;
import bubble.server.BubbleConfiguration;
import lombok.extern.slf4j.Slf4j;
import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.entity.ContentType;
import org.cobbzilla.util.http.ApiConnectionInfo;
import org.cobbzilla.util.http.HttpRequestBean;
import org.cobbzilla.util.http.URIUtil;
import org.cobbzilla.wizard.server.config.HttpConfiguration;

import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import static bubble.ApiConstants.isHttpsPort;
import static bubble.server.BubbleServer.getRestoreKey;
import static bubble.server.BubbleServer.isRestoreMode;
import static org.cobbzilla.util.daemon.ZillaRuntime.die;
import static org.cobbzilla.util.daemon.ZillaRuntime.notSupported;
import static org.cobbzilla.util.json.JsonUtil.COMPACT_MAPPER;
import static org.cobbzilla.util.json.JsonUtil.json;

@Slf4j
public class BubbleNodeClient extends BubbleApiClient {

public static final String H_BUBBLE_FROM_NODE_UUID = "X-Bubble-From-Node-UUID";
public static final String H_BUBBLE_FROM_NODE_KEY = "X-Bubble-From-Node-Key";
public static final String H_BUBBLE_TO_NODE_KEY = "X-Bubble-To-Node-Key";
public static final String H_BUBBLE_RESTORE_KEY = "X-Bubble-Restore-Key";

private BubbleNode fromNode;
private BubbleNodeKey fromKey;
private BubbleNode toNode;
private BubbleNodeKey toKey;
private BubbleApiClient alternate;
private boolean useAlternate = false;

public BubbleNodeClient(BubbleNode toNode, BubbleConfiguration configuration) {
// use http if connection is to localhost
super(new ApiConnectionInfo(baseUri(toNode, configuration)));
initKeys(toNode, configuration);
alternate = getAlternate(toNode, configuration);
}

// ensure we have at least one valid key so others can talk to us
public BubbleNodeClient(BubbleNode toNode, BubbleConfiguration configuration, boolean alternate) {
super(new ApiConnectionInfo(baseUri(toNode, configuration)));
initKeys(toNode, configuration);
this.alternate = null;
}

public void initKeys(BubbleNode toNode, BubbleConfiguration configuration) {
final BubbleNodeKeyDAO nodeKeyDAO = configuration.getBean(BubbleNodeKeyDAO.class);

toKey = nodeKeyDAO.findFirstByNode(toNode.getUuid());
if (toKey == null) die("initKeys: no toKey found for toNode: "+ toNode.getUuid());

fromNode = configuration.getThisNode();

// ensure we have at least one valid key so others can talk to us
final List<BubbleNodeKey> keys = nodeKeyDAO.findByNode(fromNode.getUuid());
if (BubbleNodeKey.shouldGenerateNewKey(keys)) {
fromKey = nodeKeyDAO.create(new BubbleNodeKey(fromNode));
} else {
fromKey = keys.get(0);
}

this.toNode = toNode;
}

public BubbleNodeClient getAlternate(BubbleNode node, BubbleConfiguration configuration) {
return new BubbleNodeClient(node, configuration, true);
}

private static String baseUri(BubbleNode node, BubbleConfiguration configuration) {
final HttpConfiguration http = configuration.getHttp();

if (node.getUuid().equals(configuration.getThisNode().getUuid())) {
return "http://127.0.0.1:"+ http.getPort()+ http.getBaseUri();
}
return (isHttpsPort(node.getSslPort()) ? "https://" : "http://")
+ node.getFqdn() + ":" + node.getSslPort() + http.getBaseUri();
}

@Override protected <T> void setRequestEntity(HttpEntityEnclosingRequest entityRequest, T data, ContentType contentType) {
if (data instanceof InputStream) notSupported("setRequestEntity: InputStream not supported");
super.setRequestEntity(entityRequest, json(fromKey.encrypt(data.toString(), toKey.getRsaKey())), contentType);
}

@Override protected String getJson(HttpRequestBean requestBean) throws Exception {
return notSupported("setRequestEntity: getJson not supported");
}

@Override public Map<String, String> getHeaders() {
Map<String, String> headers = super.getHeaders();
if (headers == null) headers = new HashMap<>();
headers.put(H_BUBBLE_FROM_NODE_UUID, fromNode.getUuid());
headers.put(H_BUBBLE_FROM_NODE_KEY, fromKey.getUuid());
headers.put(H_BUBBLE_TO_NODE_KEY, toKey.getUuid());
if (isRestoreMode()) {
log.debug("getHeaders: adding restore key: "+getRestoreKey());
headers.put(H_BUBBLE_RESTORE_KEY, getRestoreKey());
}
log.debug("getHeaders: returning "+json(headers, COMPACT_MAPPER));
return headers;
}

@Override public HttpResponse execute(HttpClient client, HttpRequestBase request) throws IOException {
if (useAlternate) {
log.info("execute: useAlternate true, using alternate...");
return alternate.execute(client, request);
}
try {
log.debug("execute: attempting request...");
return super.execute(client, request);
} catch (Exception e) {
log.info("execute: error: "+e);
if (alternate == null) throw e;

final String uri = (isHttpsPort(toNode.getSslPort()) ? "https://" : "http://")
+ toNode.getIp4() + ":" + toNode.getAdminPort() + URIUtil.getPath(request.getURI().toString());
request.setURI(URI.create(uri));
log.info("execute: api call failed, trying alternate...");
final HttpResponse response = alternate.execute(client, request);
useAlternate = true;
log.info("execute: api call failed, alternate succeeded, will continue using that");
return response;
}
}

@Override public void close() {
super.close();
if (alternate != null) alternate.close();
}

}

+ 38
- 0
bubble-server/src/main/java/bubble/client/BubbleNodeDownloadClient.java View File

@@ -0,0 +1,38 @@
package bubble.client;

import bubble.model.cloud.BubbleNode;
import bubble.server.BubbleConfiguration;

import static java.util.concurrent.TimeUnit.MINUTES;
import static java.util.concurrent.TimeUnit.SECONDS;

public class BubbleNodeDownloadClient extends BubbleNodeClient {

public static final int NUM_TRIES = 10;
public static final int DL_CONNECT_TIMEOUT = (int) SECONDS.toMillis(30);
public static final int DL_SOCKET_TIMEOUT = (int) MINUTES.toMillis(20);
public static final int DL_REQUEST_TIMEOUT = (int) MINUTES.toMillis(20);

public BubbleNodeDownloadClient(BubbleNode node, BubbleConfiguration configuration) {
super(node, configuration);
init();
}

public BubbleNodeDownloadClient(BubbleNode node, BubbleConfiguration configuration, boolean alternate) {
super(node, configuration, alternate);
init();
}

@Override public BubbleNodeClient getAlternate(BubbleNode node, BubbleConfiguration configuration) {
return new BubbleNodeDownloadClient(node, configuration, true);
}

public void init() {
setNumTries(NUM_TRIES);
setConnectTimeout(DL_CONNECT_TIMEOUT);
setSocketTimeout(DL_SOCKET_TIMEOUT);
setRequestTimeout(DL_REQUEST_TIMEOUT);
}

}


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save