@@ -0,0 +1,9 @@ | |||
# This file loads the proper rgloader/loader.rb file that comes packaged | |||
# with Vagrant so that encoded files can properly run with Vagrant. | |||
if ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"] | |||
require File.expand_path( | |||
"rgloader/loader", ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"]) | |||
else | |||
raise "Encoded files can't be read outside of the Vagrant installer." | |||
end |
@@ -0,0 +1,62 @@ | |||
# -*- mode: ruby -*- | |||
# vi: set ft=ruby : | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
# Bubble Vagrantfile | |||
# ================== | |||
# `vagrant up` will create a full Bubble development environment, and optionally start | |||
# a local launcher. | |||
# | |||
# ## Environment Variables | |||
# | |||
# ### LETSENCRYPT_EMAIL | |||
# If you specify the LETSENCRYPT_EMAIL environment variable, then `vagrant up` will also | |||
# start a Local Launcher (see docs/local-launcher.md) which is your starting point for | |||
# launching new Bubbles. | |||
# | |||
# ### BUBBLE_PORT | |||
# By default, Bubble will listen on port 8090. | |||
# If something else is already using that port on your computer, `vagrant up` will fail. | |||
# Set the `BUBBLE_PORT` environment variable to another port, and Bubble will listen on | |||
# that port instead. | |||
# | |||
# ### BUBBLE_GIT_TAG | |||
# By default, the Vagrant box will run the bleeding edge (`master` branch) of Bubble. | |||
# Set the `BUBBLE_GIT_TAG` environment variable to a git branch or tag that should be | |||
# checked out instead. | |||
# | |||
# | |||
Vagrant.configure("2") do |config| | |||
config.vm.box = "ubuntu/focal64" | |||
# You can access the launcher on port 8090 (or BUBBLE_PORT) but only on 127.0.0.1 | |||
# If you want to allow outside access to port 8090 (listen on 0.0.0.0), use the version below | |||
config.vm.network "forwarded_port", guest: 8090, host: ENV['BUBBLE_PORT'] || 8090, host_ip: "127.0.0.1" | |||
# Anyone who can reach port 8090 on this system will be able to access the launcher | |||
# config.vm.network "forwarded_port", guest: 8090, host: ENV['BUBBLE_PORT'] || 8090 | |||
config.vm.provision :shell do |s| | |||
s.env = { | |||
LETSENCRYPT_EMAIL: ENV['LETSENCRYPT_EMAIL'], | |||
GIT_TAG: ENV['BUBBLE_GIT_TAG'] || 'master' | |||
} | |||
s.inline = <<-SHELL | |||
apt-get update -y | |||
apt-get upgrade -y | |||
if [[ ! -d bubble ]] ; then | |||
git clone https://git.bubblev.org/bubblev/bubble.git | |||
fi | |||
cd bubble | |||
git fetch && git pull origin ${GIT_TAG} | |||
./bin/first_time_ubuntu.sh | |||
./bin/first_time_setup.sh | |||
if [[ -n "${LETSENCRYPT_EMAIL}" ]] ; then | |||
echo "export LETSENCRYPT_EMAIL=${LETSENCRYPT_EMAIL}" > bubble.env | |||
./bin/run.sh bubble.env | |||
fi | |||
echo "we are in $(pwd) ok man??" | |||
# chown -R vagrant ./* | |||
SHELL | |||
end | |||
end |
@@ -0,0 +1,3 @@ | |||
#!/bin/bash | |||
vagrant box add ubuntu/focal64 |
@@ -38,7 +38,7 @@ fi | |||
git submodule update --init --recursive || die "Error in git submodule update" | |||
pushd utils/cobbzilla-parent || die "Error pushing utils/cobbzilla-parent directory" | |||
mvn install || die "Error installing cobbzilla-parent" | |||
mvn -q install || die "Error installing cobbzilla-parent" | |||
popd || die "Error popping back from utils/cobbzilla-parent" | |||
UTIL_REPOS=" | |||
@@ -49,20 +49,21 @@ cobbzilla-wizard | |||
abp-parser | |||
" | |||
pushd utils || die "Error pushing utils directory" | |||
MVN_QUIET="-q -DskipTests=true -Dcheckstyle.skip=true" | |||
for repo in ${UTIL_REPOS}; do | |||
pushd "${repo}" && mvn -DskipTests=true -Dcheckstyle.skip=true clean install && popd || die "Error installing ${repo}" | |||
pushd "${repo}" && mvn ${MVN_QUIET} clean install && popd || die "Error installing ${repo}" | |||
done | |||
popd || die "Error popping back from utils directory" | |||
if [[ -z "${BUBBLE_SETUP_MODE}" || "${BUBBLE_SETUP_MODE}" == "web" ]] ; then | |||
INSTALL_WEB=web mvn -DskipTests=true -Dcheckstyle.skip=true clean package || die "Error building bubble jar" | |||
INSTALL_WEB=web mvn ${MVN_QUIET} -Pproduction clean package || die "Error building bubble jar" | |||
elif [[ "${BUBBLE_SETUP_MODE}" == "debug" ]] ; then | |||
DEBUG_BUILD=debug mvn -DskipTests=true -Dcheckstyle.skip=true clean package || die "Error building bubble jar" | |||
DEBUG_BUILD=debug mvn ${MVN_QUIET} -Pproduction clean package || die "Error building bubble jar" | |||
elif [[ "${BUBBLE_SETUP_MODE}" == "production" ]] ; then | |||
BUBBLE_PRODUCTION=1 mvn -DskipTests=true -Dcheckstyle.skip=true -Pproduction clean package || die "Error building bubble jar" | |||
BUBBLE_PRODUCTION=1 mvn -DskipTests=true -Dcheckstyle.skip=true -Pproduction-full package || die "Error building bubble full jar" | |||
BUBBLE_PRODUCTION=1 mvn ${MVN_QUIET} -Pproduction clean package || die "Error building bubble jar" | |||
BUBBLE_PRODUCTION=1 mvn ${MVN_QUIET} -Pproduction-full package || die "Error building bubble full jar" | |||
else | |||
die "env var BUBBLE_SETUP_MODE was invalid: ${BUBBLE_SETUP_MODE}" | |||
@@ -45,18 +45,19 @@ cobbzilla-wizard | |||
abp-parser | |||
" | |||
pushd utils | |||
MVN_QUIET="-q -DskipTests=true -Dcheckstyle.skip=true" | |||
for repo in ${UTIL_REPOS} ; do | |||
if [[ ${FAST} -eq 1 ]] ; then | |||
pushd ${repo} && mvn -DskipTests=true -Dcheckstyle.skip=true install && popd || die "Error installing ${repo}" | |||
pushd ${repo} && mvn ${MVN_QUIET} install && popd || die "Error installing ${repo}" | |||
else | |||
pushd ${repo} && mvn -DskipTests=true -Dcheckstyle.skip=true clean install && popd || die "Error installing ${repo}" | |||
pushd ${repo} && mvn ${MVN_QUIET} clean install && popd || die "Error installing ${repo}" | |||
fi | |||
done | |||
popd | |||
if [[ ${FAST} -eq 1 ]] ; then | |||
mvn -DskipTests=true -Dcheckstyle.skip=true clean package || die "Error building bubble jar" | |||
mvn ${MVN_QUIET} clean package || die "Error building bubble jar" | |||
else | |||
BUBBLE_PRODUCTION=1 mvn -DskipTests=true -Dcheckstyle.skip=true -Pproduction clean package || die "Error building bubble jar" | |||
BUBBLE_PRODUCTION=1 mvn -DskipTests=true -Dcheckstyle.skip=true -Pproduction-full package || die "Error building bubble full jar" | |||
BUBBLE_PRODUCTION=1 mvn ${MVN_QUIET} -Pproduction clean package || die "Error building bubble jar" | |||
BUBBLE_PRODUCTION=1 mvn ${MVN_QUIET} -Pproduction-full package || die "Error building bubble full jar" | |||
fi |
@@ -11,12 +11,12 @@ | |||
<groupId>bubble</groupId> | |||
<artifactId>bubble</artifactId> | |||
<!-- @@BUBBLE_VERSION@@ this comment must remain above the version tag so that _set_version can update it --> | |||
<version>1.4.52</version> | |||
<version>1.4.55</version> | |||
</parent> | |||
<artifactId>bubble-server</artifactId> | |||
<!-- @@BUBBLE_VERSION@@ this comment must remain above the version tag so that _set_version can update it --> | |||
<version>1.4.52</version> | |||
<version>1.4.55</version> | |||
<repositories> | |||
<repository> | |||
@@ -10,12 +10,15 @@ import lombok.Getter; | |||
import lombok.Setter; | |||
import org.cobbzilla.util.collection.NameAndValue; | |||
import static bubble.cloud.compute.ComputeDeploymentConfig.DEFAULT_DEPLOYMENT; | |||
public class ComputeConfig extends RegionalConfig { | |||
@Getter @Setter private ComputeNodeSize[] sizes; | |||
@Getter @Setter private String os; | |||
@Getter @Setter private PackerConfig packer; | |||
@Getter @Setter private NameAndValue[] config; | |||
@Getter @Setter private ComputeDeploymentConfig deployment = DEFAULT_DEPLOYMENT; | |||
public CloudRegion getRegion (String name) { | |||
for (CloudRegion r : getRegions()) { | |||
@@ -0,0 +1,13 @@ | |||
package bubble.cloud.compute; | |||
import lombok.Getter; | |||
import lombok.Setter; | |||
public class ComputeDeploymentConfig { | |||
public static final ComputeDeploymentConfig DEFAULT_DEPLOYMENT = new ComputeDeploymentConfig(); | |||
@Getter @Setter private boolean sudo = true; | |||
@Getter @Setter private boolean hostname = true; | |||
} |
@@ -69,4 +69,7 @@ public interface ComputeServiceDriver extends CloudServiceDriver, RegionalServic | |||
default void prepPackerDir(TempDir tempDir) {} | |||
default void addLaunchContext(Map<String, Object> ctx) { addLaunchContext(ctx, ""); } | |||
default void addLaunchContext(Map<String, Object> ctx, String prefix) {} | |||
} |
@@ -27,6 +27,7 @@ import static java.util.concurrent.TimeUnit.MINUTES; | |||
import static java.util.concurrent.TimeUnit.SECONDS; | |||
import static org.cobbzilla.util.daemon.ZillaRuntime.die; | |||
import static org.cobbzilla.util.daemon.ZillaRuntime.now; | |||
import static org.cobbzilla.util.reflect.ReflectionUtil.toMap; | |||
import static org.cobbzilla.util.system.Sleep.sleep; | |||
@Slf4j | |||
@@ -127,6 +128,10 @@ public abstract class ComputeServiceDriverBase | |||
.findAny().orElse(null); | |||
} | |||
@Override public void addLaunchContext(Map<String, Object> ctx, String prefix) { | |||
ctx.putAll(toMap(getConfig().getDeployment(), prefix)); | |||
} | |||
public PackerImage getOrCreatePackerImage(BubbleNode node) { | |||
PackerImage packerImage = getPackerImage(node.getInstallType(), node.getRegion()); | |||
if (packerImage == null) { | |||
@@ -23,7 +23,4 @@ public class PackerConfig { | |||
@Getter @Setter private JsonNode post; | |||
public boolean hasPost () { return post != null; } | |||
@Getter @Setter private Boolean sudo; | |||
public boolean sudo () { return sudo == null || sudo; } | |||
} |
@@ -25,17 +25,14 @@ import org.cobbzilla.util.io.TempDir; | |||
import java.io.File; | |||
import java.io.IOException; | |||
import java.util.ArrayList; | |||
import java.util.Arrays; | |||
import java.util.List; | |||
import java.util.Map; | |||
import java.util.*; | |||
import java.util.concurrent.ConcurrentHashMap; | |||
import java.util.function.Predicate; | |||
import java.util.stream.Collectors; | |||
import static bubble.service.packer.PackerJob.PACKER_IMAGE_PREFIX; | |||
import static com.github.dockerjava.api.model.InternetProtocol.UDP; | |||
import static java.lang.Boolean.parseBoolean; | |||
import static java.lang.Integer.parseInt; | |||
import static java.util.Collections.emptyList; | |||
import static java.util.Collections.singletonList; | |||
import static java.util.concurrent.TimeUnit.SECONDS; | |||
@@ -63,7 +60,7 @@ public class DockerComputeDriver extends ComputeServiceDriverBase { | |||
public static final Map<String, ComputeNodeSize> NODE_SIZE_MAP = MapBuilder.build(LOCAL, LOCAL_SIZE); | |||
public static final ExposedPort[] SAGE_EXPOSED_PORTS = { | |||
new ExposedPort(22), new ExposedPort(80), new ExposedPort(443), new ExposedPort(1202) | |||
new ExposedPort(22), new ExposedPort(80), new ExposedPort(443) | |||
}; | |||
public static final ExposedPort[] NODE_EXPOSED_PORTS = ArrayUtil.append(SAGE_EXPOSED_PORTS, | |||
new ExposedPort(1080), new ExposedPort(1443), | |||
@@ -79,9 +76,9 @@ public class DockerComputeDriver extends ComputeServiceDriverBase { | |||
public static final String BASE_IMAGE = "phusion/baseimage:focal-1.0.0alpha1-amd64"; | |||
public static final List<OsImage> CLOUD_OS_IMAGES = Arrays.asList(new OsImage[]{ | |||
public static final List<OsImage> CLOUD_OS_IMAGES = singletonList( | |||
new OsImage().setName(BASE_IMAGE).setId(BASE_IMAGE).setRegion(LOCAL) | |||
}); | |||
); | |||
public static final long START_TIMEOUT = SECONDS.toMillis(120); | |||
public static final String DEFAULT_HOST = "unix:///var/run/docker.sock"; | |||
@@ -106,10 +103,10 @@ public class DockerComputeDriver extends ComputeServiceDriverBase { | |||
@Override public String getPackerImageId(String name, PackerBuild packerBuild) { return name; } | |||
private final Map<String, Map<Integer, Integer>> portMappings = new ConcurrentHashMap(); | |||
private final Map<String, Map<Integer, Integer>> portMappings = new ConcurrentHashMap<>(); | |||
@Override public int getSshPort(BubbleNode node) { | |||
return portMappings.get(node.getUuid()).get(1202); | |||
return portMappings.get(node.getUuid()).get(22); | |||
} | |||
@Getter(lazy=true) private final DockerClient dockerClient = initDockerClient(); | |||
@@ -176,27 +173,43 @@ public class DockerComputeDriver extends ComputeServiceDriverBase { | |||
{LABEL_NODE, node.getUuid()} | |||
})) | |||
.withHostConfig(HostConfig.newHostConfig() | |||
.withPublishAllPorts(true) | |||
.withCapAdd(Capability.NET_ADMIN) | |||
.withCapAdd(Capability.SYS_MODULE) | |||
.withCapAdd(Capability.SYS_ADMIN)); | |||
dc.startContainerCmd(ccr.exec().getId()).exec(); | |||
final long start = now(); | |||
final Predicate<? super BubbleNode> nodeFilter = filterForNode(node); | |||
while (listNodes().stream().noneMatch(nodeFilter)) { | |||
if (now() - start > START_TIMEOUT) { | |||
return die("start("+node.id()+"): timeout"); | |||
String containerId = null; | |||
while (now() - start <= START_TIMEOUT) { | |||
if (containerId == null) { | |||
containerId = lookupContainer(node); | |||
} else { | |||
final InspectContainerResponse status = dc.inspectContainerCmd(containerId).exec(); | |||
final Boolean running = status.getState().getRunning(); | |||
if (running == null || !running) return die("start(" + node.id() + "): not found but not running"); | |||
final NetworkSettings networkSettings = status.getNetworkSettings(); | |||
if (networkSettings != null) { | |||
final Ports ports = networkSettings.getPorts(); | |||
if (ports != null) { | |||
final Map<ExposedPort, Ports.Binding[]> bindings = ports.getBindings(); | |||
if (bindings != null) { | |||
final Map<Integer, Integer> portMap = new HashMap<>(); | |||
for (Map.Entry<ExposedPort, Ports.Binding[]> entry : bindings.entrySet()) { | |||
final ExposedPort exp = entry.getKey(); | |||
final Ports.Binding[] b = entry.getValue(); | |||
portMap.put(exp.getPort(), parseInt(b[0].getHostPortSpec())); | |||
} | |||
portMappings.put(node.getUuid(), portMap); | |||
return node.setState(BubbleNodeState.running).setIp4("127.0.0.1").setIp6("fd00::1"); | |||
} | |||
} | |||
} | |||
} | |||
sleep(SECONDS.toMillis(5), "waiting for docker container to be running"); | |||
} | |||
final String containerId = lookupContainer(node); | |||
if (containerId == null) return die("start("+node.id()+"): not not found after starting"); | |||
final InspectContainerResponse status = dc.inspectContainerCmd(containerId).exec(); | |||
return node.setIp4("127.0.0.1").setIp6("fd00::1"); | |||
} | |||
private Predicate<? super BubbleNode> filterForNode(BubbleNode node) { | |||
return n -> n.isRunning() && n.getUuid().equals(node.getUuid()); | |||
return die("start("+node.id()+"): timeout"); | |||
} | |||
private String lookupContainer(BubbleNode node) { | |||
@@ -223,6 +236,7 @@ public class DockerComputeDriver extends ComputeServiceDriverBase { | |||
return node; | |||
} | |||
dc.stopContainerCmd(containerId).exec(); | |||
portMappings.remove(node.getUuid()); | |||
return node; | |||
} | |||
@@ -94,6 +94,7 @@ public class AnsiblePrepService { | |||
ctx.put("publicBaseUri", network.getPublicUri()); | |||
ctx.put("support", configuration.getSupport()); | |||
ctx.put("appLinks", configuration.getAppLinks()); | |||
computeDriver.addLaunchContext(ctx, "bubble_deploy_"); | |||
if (shouldEnableOpenApi(installType, nodeSize)) { | |||
ctx.put("openapi_contact_email", configuration.getOpenApi().getContactEmail()); | |||
@@ -114,14 +115,9 @@ public class AnsiblePrepService { | |||
ctx.put("testMode", !fork && configuration.testMode()); | |||
// Determine which apps should be copied based on plan | |||
final List<BubblePlanApp> planApps; | |||
if (configuration.paymentsEnabled()) { | |||
final AccountPlan accountPlan = accountPlanDAO.findByAccountAndNetwork(account.getUuid(), network.getUuid()); | |||
if (accountPlan == null) return die("prepAnsible: no AccountPlan found for network: "+network.getUuid()); | |||
planApps = planAppDAO.findByPlan(accountPlan.getPlan()); | |||
} else { | |||
planApps = null; | |||
} | |||
final AccountPlan accountPlan = accountPlanDAO.findByAccountAndNetwork(account.getUuid(), network.getUuid()); | |||
if (accountPlan == null) return die("prepAnsible: no AccountPlan found for network: "+network.getUuid()); | |||
final List<BubblePlanApp> planApps = planAppDAO.findByPlan(accountPlan.getPlan()); | |||
// Copy database with new encryption key | |||
final String key = dbFilter.copyDatabase(fork, launchType, network, node, account, planApps, new File(bubbleFilesDir, "bubble.sql.gz")); | |||
@@ -331,7 +331,7 @@ public class StandardNetworkService implements NetworkService { | |||
log.info("newNode: running script:\n"+script); | |||
for (int i=0; i<MAX_ANSIBLE_TRIES; i++) { | |||
sleep((i+1) * SECONDS.toMillis(5), "waiting to try ansible setup"); | |||
sleep((i+1) * SECONDS.toMillis(3), "waiting to try ansible setup"); | |||
// Use .uncloseable() because it the command fails due to connection timeout/refused, | |||
// the output stream is closed; if a retry succeeds, there's no output to the progressMeter | |||
final CommandResult result = ansibleSetup(script, progressMeter.uncloseable()); | |||
@@ -46,14 +46,16 @@ public abstract class EntityIterator implements Iterator<Identifiable> { | |||
@Getter private final Thread thread; | |||
@Getter private final AtomicReference<Exception> error; | |||
@Getter private final boolean paymentsEnabled; | |||
private List<BubbleApp> userApps; | |||
private final Map<CloudServiceType, CloudService> noopClouds = new HashMap<>(); | |||
private final AtomicBoolean iterating = new AtomicBoolean(false); | |||
public boolean iterating () { return iterating.get(); } | |||
public EntityIterator(AtomicReference<Exception> error) { | |||
public EntityIterator(AtomicReference<Exception> error, boolean paymentsEnabled) { | |||
this.error = error; | |||
this.paymentsEnabled = paymentsEnabled; | |||
this.thread = background(this::_iterate, "EntityIterator", this.error::set); | |||
} | |||
@@ -184,7 +186,7 @@ public abstract class EntityIterator implements Iterator<Identifiable> { | |||
// clear out payment information, set driver to noop | |||
final CloudService noopCloud = noopClouds.get(CloudServiceType.payment); | |||
if (noopCloud == null) { | |||
die("addEntities: "+NOOP_CLOUD+" for payment cloud type not found"); | |||
if (paymentsEnabled) die("addEntities: "+NOOP_CLOUD+" for payment cloud type not found"); | |||
} else { | |||
entities.forEach(e -> { | |||
final AccountPaymentMethod apm = (AccountPaymentMethod) e; | |||
@@ -62,7 +62,7 @@ public class FilteredEntityIterator extends EntityIterator { | |||
BubbleNode node, | |||
List<BubblePlanApp> planApps, | |||
AtomicReference<Exception> error) { | |||
super(error); | |||
super(error, configuration.paymentsEnabled()); | |||
this.configuration = configuration; | |||
this.account = account; | |||
this.network = network; | |||
@@ -21,18 +21,18 @@ import static org.cobbzilla.wizard.dao.AbstractCRUDDAO.ORDER_CTIME_ASC; | |||
@Slf4j | |||
public class FullEntityIterator extends EntityIterator { | |||
private final BubbleConfiguration config; | |||
private final BubbleConfiguration configuration; | |||
private final Account account; | |||
private final BubbleNetwork network; | |||
private final LaunchType launchType; | |||
public FullEntityIterator (BubbleConfiguration config, | |||
public FullEntityIterator (BubbleConfiguration configuration, | |||
Account account, | |||
BubbleNetwork network, | |||
LaunchType launchType, | |||
AtomicReference<Exception> error) { | |||
super(error); | |||
this.config = config; | |||
super(error, configuration.paymentsEnabled()); | |||
this.configuration = configuration; | |||
this.network = network; | |||
this.account = account; | |||
this.launchType = launchType; | |||
@@ -41,15 +41,15 @@ public class FullEntityIterator extends EntityIterator { | |||
protected void iterate() { | |||
final String prefix = "iterate(" + (network == null ? "no-network" : network.getUuid()) + "): "; | |||
try { | |||
config.getEntityClasses() | |||
.forEach(c -> addEntities(true, c, config.getDaoForEntityClass(c).findAll(ORDER_CTIME_ASC), | |||
configuration.getEntityClasses() | |||
.forEach(c -> addEntities(true, c, configuration.getDaoForEntityClass(c).findAll(ORDER_CTIME_ASC), | |||
network, null, null)); | |||
if (account != null && network != null && launchType != null && launchType == LaunchType.fork_node) { | |||
// add an initial device so that algo starts properly the first time | |||
// name and totp key will be overwritten when the device is initialized for use | |||
log.info(prefix+"creating a single dummy device for algo to start properly"); | |||
final var initDevice = newUninitializedDevice(network.getUuid(), account.getUuid()); | |||
add(config.getBean(DeviceDAO.class).create(initDevice)); | |||
add(configuration.getBean(DeviceDAO.class).create(initDevice)); | |||
} | |||
log.debug(prefix+"completed"); | |||
@@ -83,7 +83,6 @@ public class PackerJob implements Callable<List<PackerImage>> { | |||
public static final String IMAGE_REGIONS_VAR = "imageRegions"; | |||
public static final String BUILDERS_VAR = "builders"; | |||
public static final String POST_PROCESSOR_VAR = "postProcessor"; | |||
public static final String SUDO_VAR = "sudo"; | |||
public static final String PACKER_PLAYBOOK_TEMPLATE = "packer-playbook.yml.hbs"; | |||
public static final String PACKER_PLAYBOOK = "packer-playbook.yml"; | |||
public static final String PACKER_BINARY = System.getProperty("user.home")+"/packer/packer"; | |||
@@ -290,7 +289,9 @@ public class PackerJob implements Callable<List<PackerImage>> { | |||
builderJsons.add(generateBuilder(packerConfig, ctx)); | |||
} | |||
ctx.put(BUILDERS_VAR, builderJsons); | |||
ctx.put(SUDO_VAR, packerConfig.sudo()); | |||
// allow compute driver to add config deployment context vars | |||
computeDriver.addLaunchContext(ctx); | |||
if (packerConfig.hasPost()) ctx.put(POST_PROCESSOR_VAR, generatePostProcessor(packerConfig, ctx)); | |||
@@ -1,3 +1,3 @@ | |||
# Do not edit this file directly | |||
# Use _set_version to update the Bubble version in all files | |||
bubble.version=Adventure 1.4.52 | |||
bubble.version=Adventure 1.4.55 |
@@ -6,9 +6,11 @@ LOG=/var/log/bubble/ansible.log | |||
# Stop unattended upgrades so that apt installs will work | |||
# unattended upgrades are re-enabled at the end of the ansible run | |||
systemctl stop unattended-upgrades | |||
UNATTENDED_UPGRADES_DISABLED=20auto-upgrades-disabled | |||
cp /usr/share/unattended-upgrades/${UNATTENDED_UPGRADES_DISABLED} /etc/apt/apt.conf.d/${UNATTENDED_UPGRADES_DISABLED} | |||
UNATTENDED_UPGRADES_DISABLED=/usr/share/unattended-upgrades/20auto-upgrades-disabled | |||
if [[ -f ${UNATTENDED_UPGRADES_DISABLED} ]] ; then | |||
systemctl stop unattended-upgrades || exit 1 | |||
cp ${UNATTENDED_UPGRADES_DISABLED} /etc/apt/apt.conf.d/$(basename ${UNATTENDED_UPGRADES_DISABLED}) || exit 1 | |||
fi | |||
# Enable job control. Allows us to start creating dhparam in the background right now. | |||
{{#if isNode}}# For node, also allows us to install AlgoVPN in the background.{{/if}} | |||
@@ -35,7 +37,7 @@ function kill_bg_jobs { | |||
} | |||
function log { | |||
echo "${1}" >> ${LOG} | |||
echo "${1}" | tee -a /tmp/$(basename ${LOG}).saved >> ${LOG} | |||
} | |||
function die { | |||
@@ -106,7 +108,8 @@ fi | |||
kill_bg_jobs | |||
# ansible should have already restarted unattended-upgrades, but just in case | |||
rm -f /etc/apt/apt.conf.d/${UNATTENDED_UPGRADES_DISABLED} | |||
systemctl restart unattended-upgrades | |||
if [[ -f ${UNATTENDED_UPGRADES_DISABLED} ]] ; then | |||
rm -f /etc/apt/apt.conf.d/${UNATTENDED_UPGRADES_DISABLED} | |||
systemctl restart unattended-upgrades || die "Error running: systemctl restart unattended-upgrades" | |||
fi | |||
exit 0 |
@@ -1,6 +1,7 @@ | |||
{ | |||
"name": "common", | |||
"config": [ | |||
{"name": "hostname", "value": "[[node.fqdn]]"} | |||
{"name": "hostname", "value": "[[node.fqdn]]"}, | |||
{"name": "bubble_set_hostname", "value": "[[bubble_deploy_hostname]]"} | |||
] | |||
} |
@@ -4,6 +4,7 @@ | |||
- name: Set hostname to {{ hostname }} | |||
hostname: | |||
name: '{{ hostname }}' | |||
when: bubble_set_hostname == 'true' | |||
- name: Set log flag to true with EX of 7 days for non-sage nodes | |||
shell: echo 'set bubble.StandardSelfNodeService.bubble_server_logs_enabled "true" EX 604800' | redis-cli | |||
@@ -8375,12 +8375,8 @@ ranis | |||
ranke | |||
ranks | |||
rants | |||
raped | |||
raper | |||
rapes | |||
raphe | |||
rapid | |||
rappe | |||
rared | |||
raree | |||
rarer | |||
@@ -159,9 +159,12 @@ | |||
"regions": [{"name": "local", "internalName": "local"}], | |||
"sizes": [{"name": "local", "type": "local", "internalName": "local"}], | |||
"os": "phusion/baseimage:focal-1.0.0alpha1-amd64", | |||
"deployment": { | |||
"sudo": false, | |||
"hostname": false | |||
}, | |||
"packer": { | |||
"vars": [], | |||
"sudo": false, | |||
"builder": { | |||
"type": "docker", | |||
"image": "<<os.name>>", | |||
@@ -1,9 +1,10 @@ | |||
- name: Install packages missing on docker ubuntu | |||
apt: | |||
name: [ 'curl', 'nginx', 'cron', 'iptables', 'redis', 'postgresql', 'supervisor' ] | |||
name: [ 'curl', 'rsync', 'nginx', 'cron', 'iptables', 'redis', 'postgresql', 'supervisor' ] | |||
state: present | |||
update_cache: yes | |||
# phusion daemon documentation: https://github.com/phusion/baseimage-docker#adding-additional-daemons | |||
- name: Ensure /service/ dirs exists | |||
file: | |||
path: "/service/{{ item }}" | |||
@@ -58,3 +59,7 @@ | |||
owner: root | |||
group: root | |||
mode: 0755 | |||
# documented here: https://github.com/phusion/baseimage-docker#enabling-ssh | |||
- name: Enable sshd | |||
shell: rm -f /etc/service/sshd/down |
@@ -109,7 +109,7 @@ | |||
owner: root | |||
group: root | |||
mode: 0400 | |||
when: packer_builder_type != 'docker' and fw_enable_ssh | |||
when: fw_enable_ssh | |||
- name: Install SSH fail2ban settings | |||
copy: | |||
@@ -126,9 +126,12 @@ | |||
"regions": [{"name": "local", "internalName": "local"}], | |||
"sizes": [{"name": "local", "type": "local", "internalName": "local"}], | |||
"os": "phusion/baseimage:focal-1.0.0alpha1-amd64", | |||
"deployment": { | |||
"sudo": false, | |||
"hostname": false | |||
}, | |||
"packer": { | |||
"vars": [], | |||
"sudo": false, | |||
"builder": { | |||
"type": "docker", | |||
"image": "<<os.name>>", | |||
@@ -28,11 +28,11 @@ For Mac OS X systems, run: | |||
The important things to install: | |||
* Java 11 | |||
* Maven 3 | |||
* PostgreSQL 12 | |||
* PostgreSQL 10+ (12+ preferred) | |||
* Redis | |||
* Python 3 | |||
* Packer | |||
* Required tools: curl, jq, uuid, sha256sum, openssl, ssh, scp, rsync, npm, webpack, unzip | |||
* Python 3.8+ | |||
* Packer (try `bin/install_packer.sh` first, it might work fine) | |||
* Required tools: curl, jq, uuid, sha256sum, openssl, ssh, scp, rsync, npm, webpack, zip, unzip | |||
Look at the `first_time_ubuntu.sh` script and ensure you've basically done all that it does, | |||
including creating PostgreSQL users/databases. | |||
@@ -14,7 +14,7 @@ | |||
<groupId>bubble</groupId> | |||
<artifactId>bubble</artifactId> | |||
<!-- @@BUBBLE_VERSION@@ this comment must remain above the version tag so that _set_version can update it --> | |||
<version>1.4.52</version> | |||
<version>1.4.55</version> | |||
<packaging>pom</packaging> | |||
<licenses> | |||
@@ -1 +1 @@ | |||
Subproject commit 8d2c6f1e9b4bac49688508bf3f9ac584a5b86619 | |||
Subproject commit e0faad31cb5cadfc000277d3f2052a57cb441b2d |
@@ -10,7 +10,7 @@ This code is available under the GNU Affero General Public License, version 3: h | |||
<groupId>bubble</groupId> | |||
<artifactId>utils</artifactId> | |||
<!-- @@BUBBLE_VERSION@@ this comment must remain above the version tag so that _set_version can update it --> | |||
<version>1.4.52</version> | |||
<version>1.4.55</version> | |||
<packaging>pom</packaging> | |||
<licenses> | |||