@@ -174,24 +174,26 @@ public class VultrDriver extends ComputeServiceDriverBase { | |||
&& serverNode.has("server_state") | |||
&& serverNode.has(VULTR_V4_IP)) { | |||
final String serverState = serverNode.get("server_state").textValue(); | |||
if (!serverState.equals("ok")) continue; | |||
final String status = serverNode.get("status").textValue(); | |||
final String serverState = serverNode.get("server_state").textValue(); | |||
final String ip4 = serverNode.get(VULTR_V4_IP).textValue(); | |||
final String ip6 = serverNode.get(VULTR_V6_IP).textValue(); | |||
// log.info("start: server_state="+serverState+", status="+status, "ip4="+ip4+", ip6="+ip6); | |||
if (ip4 != null && ip4.length() > 0 && !ip4.equals("0.0.0.0")) { | |||
node.setIp4(ip4); | |||
nodeDAO.update(node); | |||
} | |||
final String ip6 = serverNode.get(VULTR_V6_IP).textValue(); | |||
if (ip6 != null && ip6.length() > 0) { | |||
node.setIp6(ip6); | |||
nodeDAO.update(node); | |||
} | |||
if (status.equals("active") && (node.hasIp4() || node.hasIp6())) { | |||
log.info("start: node is active! we can run ansible now: " + node.getIp4()); | |||
node.setState(BubbleNodeState.booted); | |||
nodeDAO.update(node); | |||
} | |||
if (serverState.equals("ok")) { | |||
log.info("start: server is ready: "+node.id()); | |||
startedOk = true; | |||
break; | |||
} | |||
@@ -31,9 +31,11 @@ import org.hibernate.annotations.Type; | |||
import javax.persistence.*; | |||
import java.io.File; | |||
import java.util.*; | |||
import java.util.concurrent.TimeUnit; | |||
import static bubble.ApiConstants.EP_NODES; | |||
import static bubble.model.cloud.BubbleNodeState.*; | |||
import static java.util.concurrent.TimeUnit.MINUTES; | |||
import static org.cobbzilla.util.daemon.ZillaRuntime.*; | |||
import static org.cobbzilla.util.io.FileUtil.abs; | |||
import static org.cobbzilla.util.json.JsonUtil.fromJson; | |||
@@ -41,6 +43,7 @@ import static org.cobbzilla.util.network.NetworkUtil.isLocalIpv4; | |||
import static org.cobbzilla.util.reflect.ReflectionUtil.copy; | |||
import static org.cobbzilla.util.string.ValidationRegexes.IP4_MAXLEN; | |||
import static org.cobbzilla.util.string.ValidationRegexes.IP6_MAXLEN; | |||
import static org.cobbzilla.util.system.Sleep.sleep; | |||
import static org.cobbzilla.wizard.model.crypto.EncryptedTypes.ENCRYPTED_STRING; | |||
import static org.cobbzilla.wizard.model.crypto.EncryptedTypes.ENC_PAD; | |||
import static org.cobbzilla.wizard.model.entityconfig.annotations.ECForeignKeySearchDepth.shallow; | |||
@@ -56,6 +59,7 @@ public class BubbleNode extends IdentifiableBase implements HasNetwork, HasBubbl | |||
public static final String TAG_TEST = "test_instance"; | |||
private static final List<String> TAG_NAMES = Arrays.asList(TAG_INSTANCE_ID, TAG_ERROR); | |||
private static final long IP_ADDR_TIMEOUT = MINUTES.toMillis(2); | |||
@Override public Collection<String> validTags() { return TAG_NAMES; } | |||
@@ -243,4 +247,11 @@ public class BubbleNode extends IdentifiableBase implements HasNetwork, HasBubbl | |||
return new BubbleNodeQuickClient(this, configuration); | |||
} | |||
public void waitForIpAddresses() { | |||
final long start = now(); | |||
while ((!hasIp4() || !hasIp6()) && now() - start < IP_ADDR_TIMEOUT) { | |||
sleep(TimeUnit.SECONDS.toMillis(2), "waiting for node to have IP addresses"); | |||
} | |||
if (!hasIp4() || !hasIp6()) die("waitForIpAddresses: timeout"); | |||
} | |||
} |
@@ -11,12 +11,12 @@ import static bubble.ApiConstants.enumFromString; | |||
public enum BubbleNodeState { | |||
created, starting, booting, booted, preparing_install, awaiting_dns, installing, running, | |||
created, starting, booting, booted, preparing_install, installing, running, | |||
stopping, stopped, | |||
unreachable, error_stopping, error_stopped, unknown_error; | |||
public static final BubbleNodeState[] ACTIVE_STATES = { | |||
created, starting, booting, booted, preparing_install, awaiting_dns, installing, running, stopping | |||
created, starting, booting, booted, preparing_install, installing, running, stopping | |||
}; | |||
@JsonCreator public static BubbleNodeState fromString (String v) { return enumFromString(BubbleNodeState.class, v); } | |||
@@ -69,7 +69,7 @@ public class SageHelloService extends SimpleDaemon { | |||
log.info("hello_to_sage: sending hello..."); | |||
final NotificationReceipt receipt = notificationService.notify(sage, hello_to_sage, selfNode); | |||
log.info("hello_to_sage: received reply from sage node: " + json(receipt, COMPACT_MAPPER)); | |||
if (receipt.isSuccess()) { | |||
if (receipt != null && receipt.isSuccess()) { | |||
if (!sageHelloSent.get()) sageHelloSent.set(true); | |||
synchronized (unlockMessage) { | |||
if (unlockMessage.get() != null && !unlockMessage.get().hasUuid()) { | |||
@@ -79,8 +79,10 @@ public class NodeProgressMeter extends PipedOutputStream implements Runnable { | |||
writer.flush(); | |||
} | |||
public boolean hasError () { return error.get(); } | |||
public void error(String line) { | |||
if (error.get()) { | |||
if (hasError()) { | |||
log.warn("error("+line+") ignored, error already set"); | |||
return; | |||
} | |||
@@ -31,9 +31,7 @@ public class NodeProgressMeterConstants { | |||
public static final String METER_TICK_CREATING_NODE = "BUBBLE: CREATING NODE..."; | |||
public static final String METER_TICK_LAUNCHING_NODE = "BUBBLE: LAUNCHING NODE..."; | |||
public static final String METER_TICK_PREPARING_ROLES = "BUBBLE: PREPARING ANSIBLE ROLES..."; | |||
public static final String METER_TICK_WRITING_DNS_RECORDS = "BUBBLE: WRITING DNS RECORDS..."; | |||
public static final String METER_TICK_PREPARING_INSTALL = "BUBBLE: PREPARING INSTALL FILES..."; | |||
public static final String METER_TICK_AWAITING_DNS = "BUBBLE: AWAITING DNS RECORDS..."; | |||
public static final String METER_TICK_STARTING_INSTALL = "BUBBLE: STARTING INSTALLATION..."; | |||
public static final String METER_TICK_COPYING_ANSIBLE = "BUBBLE: COPYING ANSIBLE FILES..."; | |||
public static final String METER_TICK_RUNNING_ANSIBLE = "BUBBLE: RUNNING ANSIBLE PLAYBOOK..."; | |||
@@ -46,10 +44,13 @@ public class NodeProgressMeterConstants { | |||
public static final String METER_ERROR_PLAN_NOT_ENABLED = "BUBBLE-ERROR: PLAN NOT ENABLED"; | |||
public static final String METER_ERROR_PEER_LIMIT_REACHED = "BUBBLE-ERROR: PEER LIMIT REACHED"; | |||
public static final String METER_ERROR_NODE_CLOUD_NOT_FOUND = "BUBBLE-ERROR: NODE CLOUD NOT FOUND"; | |||
public static final String METER_ERROR_STARTING_NODE = "BUBBLE-ERROR: ERROR STARTING NODE"; | |||
public static final String METER_ERROR_DNS = "BUBBLE-ERROR: ERROR SETTING DNS ENTRIES FOR NODE"; | |||
public static final String METER_ERROR_NO_IP = "BUBBLE-ERROR: NODE STARTED BUT HAS NO IP ADDRESS"; | |||
public static final String METER_ERROR_ROLE_VALIDATION_ERRORS = "BUBBLE-ERROR: ROLE VALIDATION FAILED"; | |||
public static final String METER_COMPLETED = "meter_completed"; | |||
public static final String METER_START_OR_DNS_ERROR = "meter_start_or_dns_error"; | |||
public static final String METER_UNKNOWN_ERROR = "meter_unknown_error"; | |||
private static final Map<String, Integer> STANDARD_TICKS = MapBuilder.build(new Object[][] { | |||
@@ -58,9 +59,7 @@ public class NodeProgressMeterConstants { | |||
{METER_TICK_CREATING_NODE, 1}, | |||
{METER_TICK_LAUNCHING_NODE, 1}, | |||
{METER_TICK_PREPARING_ROLES, 2}, | |||
{METER_TICK_WRITING_DNS_RECORDS, 4}, | |||
{METER_TICK_PREPARING_INSTALL, 4}, | |||
{METER_TICK_AWAITING_DNS, 5}, | |||
{METER_TICK_STARTING_INSTALL, 6}, | |||
{METER_TICK_COPYING_ANSIBLE, 7}, | |||
{METER_TICK_RUNNING_ANSIBLE, 15} | |||
@@ -6,7 +6,6 @@ package bubble.service.cloud; | |||
import bubble.cloud.CloudAndRegion; | |||
import bubble.cloud.compute.ComputeServiceDriver; | |||
import bubble.cloud.dns.DnsServiceDriver; | |||
import bubble.dao.account.AccountDAO; | |||
import bubble.dao.account.AccountPolicyDAO; | |||
import bubble.dao.account.AccountSshKeyDAO; | |||
@@ -30,6 +29,9 @@ import bubble.notify.NewNodeNotification; | |||
import bubble.server.BubbleConfiguration; | |||
import bubble.service.backup.RestoreService; | |||
import bubble.service.bill.PromotionService; | |||
import bubble.service.cloud.job.NodeDnsJob; | |||
import bubble.service.cloud.job.NodeJobException; | |||
import bubble.service.cloud.job.NodeStartJob; | |||
import bubble.service.notify.NotificationService; | |||
import bubble.service.packer.PackerService; | |||
import com.github.jknack.handlebars.Handlebars; | |||
@@ -38,6 +40,8 @@ import lombok.Getter; | |||
import lombok.NonNull; | |||
import lombok.extern.slf4j.Slf4j; | |||
import org.apache.commons.exec.CommandLine; | |||
import org.cobbzilla.util.daemon.AwaitResult; | |||
import org.cobbzilla.util.daemon.DaemonThreadFactory; | |||
import org.cobbzilla.util.handlebars.HandlebarsUtil; | |||
import org.cobbzilla.util.io.TempDir; | |||
import org.cobbzilla.util.system.Command; | |||
@@ -57,6 +61,8 @@ import java.io.OutputStream; | |||
import java.util.ArrayList; | |||
import java.util.List; | |||
import java.util.Map; | |||
import java.util.concurrent.ExecutorService; | |||
import java.util.concurrent.Future; | |||
import java.util.concurrent.atomic.AtomicReference; | |||
import static bubble.ApiConstants.*; | |||
@@ -68,9 +74,9 @@ import static bubble.service.boot.StandardSelfNodeService.*; | |||
import static bubble.service.cloud.NodeProgressMeter.getProgressMeterKey; | |||
import static bubble.service.cloud.NodeProgressMeter.getProgressMeterPrefix; | |||
import static bubble.service.cloud.NodeProgressMeterConstants.*; | |||
import static java.util.concurrent.TimeUnit.MINUTES; | |||
import static java.util.concurrent.TimeUnit.SECONDS; | |||
import static java.util.concurrent.TimeUnit.*; | |||
import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; | |||
import static org.cobbzilla.util.daemon.Await.awaitAll; | |||
import static org.cobbzilla.util.daemon.ZillaRuntime.*; | |||
import static org.cobbzilla.util.io.FileUtil.*; | |||
import static org.cobbzilla.util.io.StreamUtil.stream2string; | |||
@@ -90,13 +96,13 @@ public class StandardNetworkService implements NetworkService { | |||
public static final String INSTALL_LOCAL_SH = "install_local.sh"; | |||
public static final String INSTALL_LOCAL_TEMPLATE = stream2string(ANSIBLE_DIR + "/" + INSTALL_LOCAL_SH + ".hbs"); | |||
public static final int MAX_ANSIBLE_TRIES = 1; | |||
public static final int MAX_ANSIBLE_TRIES = 3; | |||
public static final int RESTORE_KEY_LEN = 6; | |||
private static final long NET_LOCK_TIMEOUT = MINUTES.toMillis(21); | |||
private static final long NET_DEADLOCK_TIMEOUT = MINUTES.toMillis(20); | |||
private static final long DNS_TIMEOUT = MINUTES.toMillis(60); | |||
private static final long PLAN_ENABLE_TIMEOUT = PURCHASE_DELAY + SECONDS.toMillis(10); | |||
private static final long NODE_START_JOB_TIMEOUT = MINUTES.toMillis(30); | |||
@Autowired private AccountDAO accountDAO; | |||
@Autowired private AccountSshKeyDAO sshKeyDAO; | |||
@@ -219,23 +225,20 @@ public class StandardNetworkService implements NetworkService { | |||
node.setState(BubbleNodeState.starting); | |||
nodeDAO.update(node); | |||
final ExecutorService backgroundJobs = DaemonThreadFactory.fixedPool(3); | |||
final List<Future<?>> jobFutures = new ArrayList<>(); | |||
// Start the cloud compute instance | |||
node.setState(BubbleNodeState.booting); | |||
nodeDAO.update(node); | |||
node = computeDriver.start(node); | |||
node.setState(BubbleNodeState.booted); | |||
nodeDAO.update(node); | |||
final NodeStartJob startJob = new NodeStartJob(this, node, nodeDAO, computeDriver); | |||
jobFutures.add(backgroundJobs.submit(startJob)); | |||
// Sanity check that it came up OK | |||
if (!node.hasIp4()) { | |||
progressMeter.error(METER_ERROR_NO_IP); | |||
final String message = "newNode: node booted but has no IP"; | |||
killNode(node, message); | |||
return die(message); | |||
} | |||
// Create DNS A and AAAA records for node | |||
final NodeDnsJob dnsJob = new NodeDnsJob(cloudDAO, domain, node, configuration); | |||
jobFutures.add(backgroundJobs.submit(dnsJob)); | |||
// Prepare ansible roles | |||
// We must wait until after server is started, because some roles require ip4 in vars | |||
node.waitForIpAddresses(); | |||
progressMeter.write(METER_TICK_PREPARING_ROLES); | |||
final Map<String, Object> ctx = ansiblePrep.prepAnsible( | |||
automation, bubbleFilesDir, account, network, node, computeDriver, | |||
@@ -245,11 +248,6 @@ public class StandardNetworkService implements NetworkService { | |||
throw new MultiViolationException(errors.getViolationBeans()); | |||
} | |||
// Create DNS A and AAAA records for node | |||
progressMeter.write(METER_TICK_WRITING_DNS_RECORDS); | |||
final CloudService dnsService = cloudDAO.findByUuid(domain.getPublicDns()); | |||
dnsService.getDnsDriver(configuration).setNode(node); | |||
progressMeter.write(METER_TICK_PREPARING_INSTALL); | |||
node.setState(BubbleNodeState.preparing_install); | |||
nodeDAO.update(node); | |||
@@ -288,21 +286,6 @@ public class StandardNetworkService implements NetworkService { | |||
final File installLocalScript = writeFile(automation, ctx, INSTALL_LOCAL_SH, INSTALL_LOCAL_TEMPLATE); | |||
chmod(installLocalScript, "500"); | |||
// ensure this hostname is visible in our DNS and in public DNS, | |||
// or else node can't create its own letsencrypt SSL cert | |||
progressMeter.write(METER_TICK_AWAITING_DNS); | |||
node.setState(BubbleNodeState.awaiting_dns); | |||
nodeDAO.update(node); | |||
// ensure it resolves authoritatively first, if anyone else asks about it, they might | |||
// cache the fact that it doesn't exist for a long time | |||
final DnsServiceDriver dnsDriver = dnsService.getDnsDriver(configuration); | |||
dnsDriver.ensureResolvable(domain, node, DNS_TIMEOUT); | |||
progressMeter.write(METER_TICK_STARTING_INSTALL); | |||
node.setState(BubbleNodeState.installing); | |||
nodeDAO.update(node); | |||
// run ansible | |||
final String sshArgs = "-o UserKnownHostsFile=/dev/null " | |||
+ "-o StrictHostKeyChecking=no " | |||
@@ -313,8 +296,25 @@ public class StandardNetworkService implements NetworkService { | |||
boolean setupOk = false; | |||
final String nodeUser = node.getUser(); | |||
final String script = getAnsibleSetupScript(automation, sshArgs, nodeUser, sshTarget); | |||
log.info("newNode: awaiting background jobs..."); | |||
final AwaitResult<Object> awaitResult = awaitAll(jobFutures, NODE_START_JOB_TIMEOUT); | |||
if (!awaitResult.allSucceeded()) { | |||
log.warn("newNode: some background jobs failed: "+awaitResult.getFailures().values()); | |||
final Exception firstException = awaitResult.getFailures().values().iterator().next(); | |||
if (firstException instanceof NodeJobException) { | |||
progressMeter.error(((NodeJobException) firstException).getMeterError()); | |||
} else { | |||
progressMeter.error(METER_START_OR_DNS_ERROR); | |||
} | |||
return die("newNode: error in setup:" + awaitResult.getFailures().values()); | |||
} | |||
waitForDebugger(script); | |||
progressMeter.write(METER_TICK_STARTING_INSTALL); | |||
node.setState(BubbleNodeState.installing); | |||
nodeDAO.update(node); | |||
log.info("newNode: running script:\n"+script); | |||
for (int i=0; i<MAX_ANSIBLE_TRIES; i++) { | |||
sleep((i+1) * SECONDS.toMillis(5), "waiting to try ansible setup"); | |||
@@ -352,7 +352,7 @@ public class StandardNetworkService implements NetworkService { | |||
if (node != null) { | |||
node.setState(BubbleNodeState.unknown_error); | |||
nodeDAO.update(node); | |||
progressMeter.error(METER_UNKNOWN_ERROR); | |||
if (!progressMeter.hasError()) progressMeter.error(METER_UNKNOWN_ERROR); | |||
killNode(node, "error: "+e); | |||
} else { | |||
final BubbleNetwork network = networkDAO.findByUuid(nn.getNetwork()); | |||
@@ -0,0 +1,49 @@ | |||
package bubble.service.cloud.job; | |||
import bubble.cloud.dns.DnsServiceDriver; | |||
import bubble.dao.cloud.CloudServiceDAO; | |||
import bubble.model.cloud.BubbleDomain; | |||
import bubble.model.cloud.BubbleNode; | |||
import bubble.model.cloud.CloudService; | |||
import bubble.server.BubbleConfiguration; | |||
import static bubble.service.cloud.NodeProgressMeterConstants.METER_ERROR_DNS; | |||
import static java.util.concurrent.TimeUnit.MINUTES; | |||
import static org.cobbzilla.util.daemon.ZillaRuntime.shortError; | |||
public class NodeDnsJob implements Runnable { | |||
private static final long DNS_TIMEOUT = MINUTES.toMillis(60); | |||
private CloudServiceDAO cloudDAO; | |||
private BubbleDomain domain; | |||
private BubbleNode node; | |||
private BubbleConfiguration configuration; | |||
public NodeDnsJob(CloudServiceDAO cloudDAO, | |||
BubbleDomain domain, | |||
BubbleNode node, | |||
BubbleConfiguration configuration) { | |||
this.cloudDAO = cloudDAO; | |||
this.domain = domain; | |||
this.node = node; | |||
this.configuration = configuration; | |||
} | |||
@Override public void run() { | |||
try { | |||
node.waitForIpAddresses(); | |||
final CloudService dnsService = cloudDAO.findByUuid(domain.getPublicDns()); | |||
dnsService.getDnsDriver(configuration).setNode(node); | |||
// ensure this hostname is visible in our DNS and in public DNS, | |||
// or else node can't create its own letsencrypt SSL cert | |||
// ensure it resolves authoritatively first, if anyone else asks about it, they might | |||
// cache the fact that it doesn't exist for a long time | |||
final DnsServiceDriver dnsDriver = dnsService.getDnsDriver(configuration); | |||
dnsDriver.ensureResolvable(domain, node, DNS_TIMEOUT); | |||
} catch (Exception e) { | |||
throw new NodeJobException(METER_ERROR_DNS, "run: error setting up DNS for node: "+shortError(e), e); | |||
} | |||
} | |||
} |
@@ -0,0 +1,19 @@ | |||
package bubble.service.cloud.job; | |||
import lombok.Getter; | |||
public class NodeJobException extends RuntimeException { | |||
@Getter private String meterError; | |||
public NodeJobException(String meterError, String message, Exception e) { | |||
super(message, e); | |||
this.meterError = meterError; | |||
} | |||
public NodeJobException(String meterError, String message) { | |||
super(message); | |||
this.meterError = meterError; | |||
} | |||
} |
@@ -0,0 +1,49 @@ | |||
package bubble.service.cloud.job; | |||
import bubble.cloud.compute.ComputeServiceDriver; | |||
import bubble.dao.cloud.BubbleNodeDAO; | |||
import bubble.model.cloud.BubbleNode; | |||
import bubble.model.cloud.BubbleNodeState; | |||
import bubble.service.cloud.StandardNetworkService; | |||
import static bubble.service.cloud.NodeProgressMeterConstants.METER_ERROR_NO_IP; | |||
import static bubble.service.cloud.NodeProgressMeterConstants.METER_ERROR_STARTING_NODE; | |||
import static org.cobbzilla.util.daemon.ZillaRuntime.shortError; | |||
public class NodeStartJob implements Runnable { | |||
private StandardNetworkService networkService; | |||
private BubbleNode node; | |||
private BubbleNodeDAO nodeDAO; | |||
private ComputeServiceDriver computeDriver; | |||
public NodeStartJob(StandardNetworkService networkService, | |||
BubbleNode node, | |||
BubbleNodeDAO nodeDAO, | |||
ComputeServiceDriver computeDriver) { | |||
this.networkService = networkService; | |||
this.node = node; | |||
this.nodeDAO = nodeDAO; | |||
this.computeDriver = computeDriver; | |||
} | |||
@Override public void run() { | |||
try { | |||
node.setState(BubbleNodeState.booting); | |||
nodeDAO.update(node); | |||
node = computeDriver.start(node); | |||
node.setState(BubbleNodeState.booted); | |||
nodeDAO.update(node); | |||
if (!node.hasIp4()) { | |||
throw new NodeJobException(METER_ERROR_NO_IP, "node booted but has no IP"); | |||
} | |||
} catch (NodeJobException e) { | |||
throw e; | |||
} catch (Exception e) { | |||
throw new NodeJobException(METER_ERROR_STARTING_NODE, "error starting node: "+shortError(e), e); | |||
} | |||
} | |||
} |
@@ -243,6 +243,7 @@ public class PackerJob implements Callable<List<PackerImage>> { | |||
} | |||
if (imagesRef != null) imagesRef.set(images); | |||
log.info("packer images created: "+images); | |||
return images; | |||
} | |||
@@ -1,58 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.algo_refresh_users.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_BASE=/root/ansible/roles/algo/algo | |||
if [[ ! -d ${ALGO_BASE} ]] ; then | |||
die "Algo VPN directory ${ALGO_BASE} not found" | |||
fi | |||
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD" | |||
if [[ ! -f "${CA_PASS_FILE}" ]] ; then | |||
die "No CA password file found: ${CA_PASS_FILE}" | |||
fi | |||
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then | |||
die "No ${ALGO_BASE}/config.cfg.hbs found" | |||
fi | |||
log "Regenerating algo config..." | |||
java -cp /home/bubble/api/bubble.jar bubble.main.BubbleMain generate-algo-conf --algo-config ${ALGO_BASE}/config.cfg.hbs || die "Error writing algo config.cfg" | |||
log "Updating algo VPN users..." | |||
cd ${ALGO_BASE} && \ | |||
python3 -m virtualenv --python="$(command -v python3)" .env \ | |||
&& source .env/bin/activate \ | |||
&& python3 -m pip install -U pip virtualenv \ | |||
&& python3 -m pip install -r requirements.txt \ | |||
&& ansible-playbook users.yml --tags update-users --skip-tags debug \ | |||
-e "ca_password=$(cat ${CA_PASS_FILE}) | |||
provider=local | |||
server=localhost | |||
store_cakey=true | |||
ondemand_cellular=false | |||
ondemand_wifi=false | |||
store_pki=true | |||
dns_adblocking=false | |||
ssh_tunneling=false | |||
endpoint={{ endpoint }} | |||
server_name={{ server_name }}" 2>&1 | tee -a ${LOG} || die "Error running algo users.yml" | |||
# Archive configs in a place that the BackupService can pick them up | |||
log "Sync'ing algo VPN users to bubble..." | |||
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz | |||
cd ${ALGO_BASE} && tar czf ${CONFIGS_BACKUP} configs && chgrp bubble ${CONFIGS_BACKUP} && chmod 660 ${CONFIGS_BACKUP} || die "Error backing up algo configs" | |||
cd /home/bubble && rm -rf configs/* && tar xzf ${CONFIGS_BACKUP} && chgrp -R bubble configs && chown -R bubble configs && chmod 500 configs || die "Error unpacking algo configs to bubble home" | |||
log "VPN users successfully sync'd to bubble" |
@@ -1,49 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.algo_refresh_users_monitor.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_BASE=/root/ansible/roles/algo/algo | |||
if [[ ! -d ${ALGO_BASE} ]] ; then | |||
die "Algo VPN directory ${ALGO_BASE} not found" | |||
fi | |||
CA_PASS_FILE="/home/bubble/.BUBBLE_ALGO_CA_KEY_PASSWORD" | |||
if [[ ! -f "${CA_PASS_FILE}" ]] ; then | |||
die "No CA password file found: ${CA_PASS_FILE}" | |||
fi | |||
if [[ ! -f "${ALGO_BASE}/config.cfg.hbs" ]] ; then | |||
die "No ${ALGO_BASE}/config.cfg.hbs found" | |||
fi | |||
BUBBLE_USER_MARKER=/home/bubble/.algo_refresh_users | |||
ALGO_USER_MARKER=${ALGO_BASE}/.algo_refresh_users | |||
if [[ ! -f ${BUBBLE_USER_MARKER} ]] ; then | |||
touch ${BUBBLE_USER_MARKER} && chown bubble ${BUBBLE_USER_MARKER} | |||
fi | |||
if [[ ! -f ${ALGO_USER_MARKER} ]] ; then | |||
touch ${ALGO_USER_MARKER} | |||
fi | |||
log "Watching marker file..." | |||
while : ; do | |||
if [[ $(stat -c %Y ${BUBBLE_USER_MARKER}) -gt $(stat -c %Y ${ALGO_USER_MARKER}) ]] ; then | |||
touch ${ALGO_USER_MARKER} | |||
sleep 5s | |||
log "Refreshing VPN users..." | |||
/usr/local/bin/algo_refresh_users.sh && log "VPN users successfully refreshed" || log "Error refreshing Algo VPN users" | |||
fi | |||
sleep 10s | |||
done |
@@ -1,187 +0,0 @@ | |||
# This is the list of users to generate. | |||
# Every device must have a unique username. | |||
# You can generate up to 250 users at one time. | |||
# Usernames with leading 0's or containing only numbers should be escaped in double quotes, e.g. "000dan" or "123". | |||
users: | |||
<<#each bubbleUsers>> - "<<this>>" | |||
<</each>> | |||
### Advanced users only below this line ### | |||
# Store the PKI in a ram disk. Enabled only if store_pki (retain the PKI) is set to false | |||
# Supports on MacOS and Linux only (including Windows Subsystem for Linux) | |||
pki_in_tmpfs: true | |||
# If True re-init all existing certificates. Boolean | |||
keys_clean_all: False | |||
# Deploy StrongSwan to enable IPsec support | |||
ipsec_enabled: true | |||
# StrongSwan log level | |||
# https://wiki.strongswan.org/projects/strongswan/wiki/LoggerConfiguration | |||
strongswan_log_level: 2 | |||
# rightsourceip for ipsec | |||
# ipv4 | |||
strongswan_network: 10.19.48.0/24 | |||
# ipv6 | |||
strongswan_network_ipv6: 'fd9d:bc11:4020::/48' | |||
# Deploy WireGuard | |||
# WireGuard will listen on 51820/UDP. You might need to change to another port | |||
# if your network blocks this one. Be aware that 53/UDP (DNS) is blocked on some | |||
# mobile data networks. | |||
wireguard_enabled: true | |||
wireguard_port: 51820 | |||
# If you're behind NAT or a firewall and you want to receive incoming connections long after network traffic has gone silent. | |||
# This option will keep the "connection" open in the eyes of NAT. | |||
# See: https://www.wireguard.com/quickstart/#nat-and-firewall-traversal-persistence | |||
wireguard_PersistentKeepalive: 0 | |||
# WireGuard network configuration | |||
wireguard_network_ipv4: 10.19.49.0/24 | |||
wireguard_network_ipv6: fd9d:bc11:4021::/48 | |||
# Reduce the MTU of the VPN tunnel | |||
# Some cloud and internet providers use a smaller MTU (Maximum Transmission | |||
# Unit) than the normal value of 1500 and if you don't reduce the MTU of your | |||
# VPN tunnel some network connections will hang. Algo will attempt to set this | |||
# automatically based on your server, but if connections hang you might need to | |||
# adjust this yourself. | |||
# See: https://github.com/trailofbits/algo/blob/master/docs/troubleshooting.md#various-websites-appear-to-be-offline-through-the-vpn | |||
reduce_mtu: 0 | |||
# Algo will use the following lists to block ads. You can add new block lists | |||
# after deployment by modifying the line starting "BLOCKLIST_URLS=" at: | |||
# /usr/local/sbin/adblock.sh | |||
# If you load very large blocklists, you may also have to modify resource limits: | |||
# /etc/systemd/system/dnsmasq.service.d/100-CustomLimitations.conf | |||
adblock_lists: | |||
- "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" | |||
- "https://hosts-file.net/ad_servers.txt" | |||
# Enable DNS encryption. | |||
# If 'false', 'dns_servers' should be specified below. | |||
# DNS encryption can not be disabled if DNS adblocking is enabled | |||
dns_encryption: true | |||
# DNS servers which will be used if 'dns_encryption' is 'true'. Multiple | |||
# providers may be specified, but avoid mixing providers that filter results | |||
# (like Cisco) with those that don't (like Cloudflare) or you could get | |||
# inconsistent results. The list of available public providers can be found | |||
# here: | |||
# https://github.com/DNSCrypt/dnscrypt-resolvers/blob/master/v2/public-resolvers.md | |||
dnscrypt_servers: | |||
ipv4: | |||
- cloudflare | |||
ipv6: | |||
- cloudflare-ipv6 | |||
# DNS servers which will be used if 'dns_encryption' is 'false'. | |||
# The default is to use Cloudflare. | |||
dns_servers: | |||
ipv4: | |||
- 1.1.1.1 | |||
- 1.0.0.1 | |||
ipv6: | |||
- 2606:4700:4700::1111 | |||
- 2606:4700:4700::1001 | |||
# Randomly generated IP address for the local dns resolver | |||
local_service_ip: "{{ '172.16.0.1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" | |||
local_service_ipv6: "{{ 'fd00::1' | ipmath(1048573 | random(seed=algo_server_name + ansible_fqdn)) }}" | |||
# Your Algo server will automatically install security updates. Some updates | |||
# require a reboot to take effect but your Algo server will not reboot itself | |||
# automatically unless you change 'enabled' below from 'false' to 'true', in | |||
# which case a reboot will take place if necessary at the time specified (as | |||
# HH:MM) in the time zone of your Algo server. The default time zone is UTC. | |||
unattended_reboot: | |||
enabled: false | |||
time: 06:00 | |||
# Block traffic between connected clients | |||
BetweenClients_DROP: true | |||
# Block SMB/CIFS traffic | |||
block_smb: true | |||
# Block NETBIOS traffic | |||
block_netbios: true | |||
congrats: | |||
common: | | |||
"# Congratulations! #" | |||
"# Your Algo server is running. #" | |||
"# Config files and certificates are in the ./configs/ directory. #" | |||
"# Go to https://whoer.net/ after connecting #" | |||
"# and ensure that all your traffic passes through the VPN. #" | |||
"# Local DNS resolver {{ local_service_ip }}{{ ', ' + local_service_ipv6 if ipv6_support else '' }} #" | |||
p12_pass: | | |||
"# The p12 and SSH keys password for new users is {{ p12_export_password }} #" | |||
ca_key_pass: | | |||
"# The CA key password is {{ CA_password|default(omit) }} #" | |||
ssh_access: | | |||
"# Shell access: ssh -i {{ ansible_ssh_private_key_file|default(omit) }} {{ ansible_ssh_user|default(omit) }}@{{ ansible_ssh_host|default(omit) }} #" | |||
SSH_keys: | |||
comment: algo@ssh | |||
private: configs/algo.pem | |||
private_tmp: /tmp/algo-ssh.pem | |||
public: configs/algo.pem.pub | |||
cloud_providers: | |||
azure: | |||
size: Standard_B1S | |||
image: 19.04 | |||
digitalocean: | |||
size: s-1vcpu-1gb | |||
image: "ubuntu-19-04-x64" | |||
ec2: | |||
# Change the encrypted flag to "true" to enable AWS volume encryption, for encryption of data at rest. | |||
encrypted: true | |||
# Set use_existing_eip to "true" if you want to use a pre-allocated Elastic IP | |||
# Additional prompt will be raised to determine which IP to use | |||
use_existing_eip: false | |||
size: t2.micro | |||
image: | |||
name: "ubuntu-disco-19.04" | |||
owner: "099720109477" | |||
gce: | |||
size: f1-micro | |||
image: ubuntu-1904 | |||
external_static_ip: false | |||
lightsail: | |||
size: nano_1_0 | |||
image: ubuntu_18_04 | |||
scaleway: | |||
size: DEV1-S | |||
image: Ubuntu Bionic Beaver | |||
arch: x86_64 | |||
hetzner: | |||
server_type: cx11 | |||
image: ubuntu-18.04 | |||
openstack: | |||
flavor_ram: ">=512" | |||
image: Ubuntu-18.04 | |||
cloudstack: | |||
size: Micro | |||
image: Linux Ubuntu 19.04 64-bit | |||
disk: 10 | |||
vultr: | |||
os: Ubuntu 19.04 x64 | |||
size: 1024 MB RAM,25 GB SSD,1.00 TB BW | |||
local: | |||
fail_hint: | |||
- Sorry, but something went wrong! | |||
- Please check the troubleshooting guide. | |||
- https://trailofbits.github.io/algo/troubleshooting.html | |||
booleans_map: | |||
Y: true | |||
y: true |
@@ -1,5 +0,0 @@ | |||
[program:algo_refresh_users_monitor] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/bin/algo_refresh_users_monitor.sh |
@@ -1,5 +0,0 @@ | |||
[program:wg_monitor_connections] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/sbin/wg_monitor_connections.sh |
@@ -1,66 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.wg_monitor_connections.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
ALGO_CONFIGS=/root/ansible/roles/algo/algo/configs | |||
BUBBLE_DEVICE_DIR=/home/bubble/wg_devices | |||
if [[ ! -d ${BUBBLE_DEVICE_DIR} ]] ; then | |||
mkdir -p ${BUBBLE_DEVICE_DIR} && chown -R bubble ${BUBBLE_DEVICE_DIR} && chmod 700 ${BUBBLE_DEVICE_DIR} || die "Error creating ${BUBBLE_DEVICE_DIR}" | |||
fi | |||
while : ; do | |||
peer="" | |||
IFS=$'\n' | |||
for line in $(wg show all) ; do | |||
if [[ ! -z "${peer}" ]] ; then | |||
if [[ $(echo "${line}" | tr -d ' ') == allowed* ]] ; then | |||
for ip in $(echo "${line}" | cut -d: -f2- | tr ',' '\n' | tr -d ' ' | cut -d/ -f1) ; do | |||
device_uuids="$(find $(find $(find ${ALGO_CONFIGS} -type d -name wireguard) -type d -name public) -type f | xargs grep -l ${peer} | xargs -n 1 basename)" | |||
if [[ $(echo "${device_uuids}" | wc -l | tr -d ' ') -gt 1 ]] ; then | |||
log "Multiple device UUIDs found for IP ${ip} (not recording anything): ${device_uuids}" | |||
continue | |||
fi | |||
device="$(echo "${device_uuids}" | head -1 | tr -d ' ')" | |||
ip_file="${BUBBLE_DEVICE_DIR}/ip_$(echo ${ip})" | |||
if [[ ! -f ${ip_file} ]] ; then | |||
touch ${ip_file} && chown bubble ${ip_file} && chmod 400 ${ip_file} || log "Error creating ${ip_file}" | |||
fi | |||
device_exists=$(grep -c "${ip}" ${ip_file}) | |||
if [[ ${device_exists} -eq 0 ]] ; then | |||
log "recorded device ${device} for IP ${ip}" | |||
echo "${device}" > ${ip_file} || log "Error writing ${device} to ${ip_file}" | |||
fi | |||
device_file="${BUBBLE_DEVICE_DIR}/device_$(echo ${device})" | |||
if [[ ! -f ${device_file} ]] ; then | |||
touch ${device_file} && chown bubble ${device_file} && chmod 400 ${device_file} || log "Error creating ${ip_file}" | |||
fi | |||
ip_exists=$(grep -c "${ip}" ${device_file}) | |||
if [[ ${ip_exists} -eq 0 ]] ; then | |||
log "recorded IP ${ip} for device ${device}" | |||
echo "${ip}" >> ${device_file} || log "Error writing ${ip} to ${device_file}" | |||
fi | |||
done | |||
peer="" | |||
fi | |||
elif [[ ${line} == peer* ]] ; then | |||
peer="$(echo "${line}" | awk '{print $NF}')" | |||
fi | |||
done | |||
sleep 30s | |||
done |
@@ -21,8 +21,8 @@ | |||
- { file: '.BUBBLE_DB_ENCRYPTION_KEY', group: postgres } # postgres user needs access to DB key | |||
- { file: '.BUBBLE_PG_PASSWORD', group: postgres } # postgres user needs access to DB password | |||
- name: Write DB key | |||
shell: echo -n "{{ db_key }}" > /home/bubble/.BUBBLE_DB_ENCRYPTION_KEY | |||
- name: Write source DB key if database does not exist | |||
shell: su - postgres bash -c "init_bubble_db.sh {{ db_name }} {{ db_user }} INIT {{ db_key }}" | |||
- name: Write bubble env file | |||
template: | |||
@@ -1,129 +0,0 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
import requests | |||
import traceback | |||
import sys | |||
import os | |||
import time | |||
import uuid | |||
import datetime | |||
import redis | |||
import json | |||
from bubble_config import bubble_network, bubble_port | |||
# Write python PID to file so that mitmdump_monitor.sh can check for excessive memory usage and restart if needed | |||
MITMDUMP_PID_FILE_PATH = '/home/mitmproxy/mitmdump.pid' | |||
MITMDUMP_PID_FILE = open(MITMDUMP_PID_FILE_PATH, "w") | |||
MITMDUMP_PID_FILE.write("%d" % os.getpid()) | |||
MITMDUMP_PID_FILE.close() | |||
HEADER_USER_AGENT = 'User-Agent' | |||
HEADER_REFERER = 'Referer' | |||
CTX_BUBBLE_MATCHERS='X-Bubble-Matchers' | |||
CTX_BUBBLE_ABORT='X-Bubble-Abort' | |||
CTX_BUBBLE_PASSTHRU='X-Bubble-Passthru' | |||
CTX_BUBBLE_REQUEST_ID='X-Bubble-RequestId' | |||
CTX_CONTENT_LENGTH='X-Bubble-Content-Length' | |||
CTX_CONTENT_LENGTH_SENT='X-Bubble-Content-Length-Sent' | |||
BUBBLE_URI_PREFIX='/__bubble/' | |||
REDIS = redis.Redis(host='127.0.0.1', port=6379, db=0) | |||
BUBBLE_ACTIVITY_LOG_PREFIX = 'bubble_activity_log_' | |||
BUBBLE_ACTIVITY_LOG_EXPIRATION = 600 | |||
def redis_set(name, value, ex): | |||
REDIS.set(name, value, nx=True, ex=ex) | |||
REDIS.set(name, value, xx=True, ex=ex) | |||
def bubble_log(message): | |||
print(str(datetime.datetime.time(datetime.datetime.now()))+': ' + message, file=sys.stderr) | |||
def bubble_activity_log(client_addr, server_addr, event, data): | |||
key = BUBBLE_ACTIVITY_LOG_PREFIX + str(time.time() * 1000.0) + '_' + str(uuid.uuid4()) | |||
value = json.dumps({ | |||
'source': 'mitmproxy', | |||
'client_addr': client_addr, | |||
'server_addr': server_addr, | |||
'event': event, | |||
'data': data | |||
}) | |||
bubble_log('bubble_activity_log: setting '+key+' = '+value) | |||
redis_set(key, value, BUBBLE_ACTIVITY_LOG_EXPIRATION) | |||
pass | |||
def bubble_passthru(remote_addr, addr, fqdn): | |||
headers = { | |||
'X-Forwarded-For': remote_addr, | |||
'Accept' : 'application/json', | |||
'Content-Type': 'application/json' | |||
} | |||
try: | |||
data = { | |||
'addr': str(addr), | |||
'fqdn': str(fqdn), | |||
'remoteAddr': remote_addr | |||
} | |||
response = requests.post('http://127.0.0.1:'+bubble_port+'/api/filter/passthru', headers=headers, json=data) | |||
return response.ok | |||
except Exception as e: | |||
bubble_log('bubble_passthru API call failed: '+repr(e)) | |||
traceback.print_exc() | |||
return False | |||
def bubble_matchers(req_id, remote_addr, flow, host): | |||
headers = { | |||
'X-Forwarded-For': remote_addr, | |||
'Accept' : 'application/json', | |||
'Content-Type': 'application/json' | |||
} | |||
if HEADER_USER_AGENT not in flow.request.headers: | |||
bubble_log('bubble_matchers: no User-Agent header, setting to UNKNOWN') | |||
user_agent = 'UNKNOWN' | |||
else: | |||
user_agent = flow.request.headers[HEADER_USER_AGENT] | |||
if HEADER_REFERER not in flow.request.headers: | |||
bubble_log('bubble_matchers: no Referer header, setting to NONE') | |||
referer = 'NONE' | |||
else: | |||
try: | |||
referer = flow.request.headers[HEADER_REFERER].encode().decode() | |||
except Exception as e: | |||
bubble_log('bubble_matchers: error parsing Referer header: '+repr(e)) | |||
referer = 'NONE' | |||
try: | |||
data = { | |||
'requestId': req_id, | |||
'fqdn': host, | |||
'uri': flow.request.path, | |||
'userAgent': user_agent, | |||
'referer': referer, | |||
'remoteAddr': remote_addr | |||
} | |||
response = requests.post('http://127.0.0.1:'+bubble_port+'/api/filter/matchers/'+req_id, headers=headers, json=data) | |||
if response.ok: | |||
return response.json() | |||
bubble_log('bubble_matchers response not OK, returning empty matchers array: '+str(response.status_code)+' / '+repr(response.text)) | |||
except Exception as e: | |||
bubble_log('bubble_matchers API call failed: '+repr(e)) | |||
traceback.print_exc() | |||
return None | |||
def add_flow_ctx(flow, name, value): | |||
if not hasattr(flow, 'bubble_ctx'): | |||
flow.bubble_ctx = {} | |||
flow.bubble_ctx[name] = value | |||
def get_flow_ctx(flow, name): | |||
if not hasattr(flow, 'bubble_ctx'): | |||
return None | |||
if not name in flow.bubble_ctx: | |||
return None | |||
return flow.bubble_ctx[name] |
@@ -1,178 +0,0 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
import re | |||
import requests | |||
import urllib | |||
import traceback | |||
from mitmproxy.net.http import Headers | |||
from bubble_config import bubble_port, bubble_host_alias | |||
from bubble_api import CTX_BUBBLE_MATCHERS, CTX_BUBBLE_ABORT, BUBBLE_URI_PREFIX, \ | |||
CTX_BUBBLE_REQUEST_ID, CTX_CONTENT_LENGTH, CTX_CONTENT_LENGTH_SENT, bubble_log, get_flow_ctx, add_flow_ctx | |||
BUFFER_SIZE = 4096 | |||
HEADER_CONTENT_TYPE = 'Content-Type' | |||
HEADER_CONTENT_LENGTH = 'Content-Length' | |||
HEADER_CONTENT_ENCODING = 'Content-Encoding' | |||
HEADER_TRANSFER_ENCODING = 'Transfer-Encoding' | |||
BINARY_DATA_HEADER = {HEADER_CONTENT_TYPE: 'application/octet-stream'} | |||
def filter_chunk(chunk, req_id, last, content_encoding=None, content_type=None, content_length=None): | |||
url = 'http://127.0.0.1:' + bubble_port + '/api/filter/apply/' + req_id | |||
params_added = False | |||
if chunk and content_type: | |||
params_added = True | |||
url = (url | |||
+ '?type=' + urllib.parse.quote_plus(content_type)) | |||
if content_encoding: | |||
url = url + '&encoding=' + urllib.parse.quote_plus(content_encoding) | |||
if content_length: | |||
url = url + '&length=' + str(content_length) | |||
if last: | |||
if params_added: | |||
url = url + '&last=true' | |||
else: | |||
url = url + '?last=true' | |||
bubble_log('filter_chunk: url='+url) | |||
response = requests.post(url, data=chunk, headers=BINARY_DATA_HEADER) | |||
if not response.ok: | |||
err_message = 'filter_chunk: Error fetching ' + url + ', HTTP status ' + str(response.status_code) | |||
bubble_log(err_message) | |||
return b'' | |||
return response.content | |||
def bubble_filter_chunks(flow, chunks, req_id, content_encoding, content_type): | |||
""" | |||
chunks is a generator that can be used to iterate over all chunks. | |||
""" | |||
first = True | |||
content_length = get_flow_ctx(flow, CTX_CONTENT_LENGTH) | |||
try: | |||
for chunk in chunks: | |||
if content_length: | |||
bytes_sent = get_flow_ctx(flow, CTX_CONTENT_LENGTH_SENT) | |||
chunk_len = len(chunk) | |||
last = chunk_len + bytes_sent >= content_length | |||
bubble_log('bubble_filter_chunks: content_length = '+str(content_length)+', bytes_sent = '+str(bytes_sent)) | |||
add_flow_ctx(flow, CTX_CONTENT_LENGTH_SENT, bytes_sent + chunk_len) | |||
else: | |||
last = False | |||
if first: | |||
yield filter_chunk(chunk, req_id, last, content_encoding, content_type, content_length) | |||
first = False | |||
else: | |||
yield filter_chunk(chunk, req_id, last) | |||
if not content_length: | |||
yield filter_chunk(None, req_id, True) # get the last bits of data | |||
except Exception as e: | |||
bubble_log('bubble_filter_chunks: exception='+repr(e)) | |||
traceback.print_exc() | |||
yield None | |||
def bubble_modify(flow, req_id, content_encoding, content_type): | |||
return lambda chunks: bubble_filter_chunks(flow, chunks, req_id, content_encoding, content_type) | |||
def send_bubble_response(response): | |||
for chunk in response.iter_content(8192): | |||
yield chunk | |||
def responseheaders(flow): | |||
if flow.request.path and flow.request.path.startswith(BUBBLE_URI_PREFIX): | |||
uri = 'http://127.0.0.1:' + bubble_port + '/' + flow.request.path[len(BUBBLE_URI_PREFIX):] | |||
bubble_log('responseheaders: sending special bubble request to '+uri) | |||
headers = { | |||
'Accept' : 'application/json', | |||
'Content-Type': 'application/json' | |||
} | |||
response = None | |||
if flow.request.method == 'GET': | |||
response = requests.get(uri, headers=headers, stream=True) | |||
elif flow.request.method == 'POST': | |||
bubble_log('responseheaders: special bubble request: POST content is '+str(flow.request.content)) | |||
headers['Content-Length'] = str(len(flow.request.content)) | |||
response = requests.post(uri, data=flow.request.content, headers=headers) | |||
else: | |||
bubble_log('responseheaders: special bubble request: method '+flow.request.method+' not supported') | |||
if response is not None: | |||
bubble_log('responseheaders: special bubble request: response status = '+str(response.status_code)) | |||
flow.response.headers = Headers() | |||
for key, value in response.headers.items(): | |||
flow.response.headers[key] = value | |||
flow.response.status_code = response.status_code | |||
flow.response.stream = lambda chunks: send_bubble_response(response) | |||
else: | |||
abort_code = get_flow_ctx(flow, CTX_BUBBLE_ABORT) | |||
if abort_code is not None: | |||
bubble_log('responseheaders: aborting request with HTTP status '+str(abort_code)) | |||
flow.response.headers = Headers() | |||
flow.response.status_code = abort_code | |||
flow.response.stream = lambda chunks: [] | |||
else: | |||
req_id = get_flow_ctx(flow, CTX_BUBBLE_REQUEST_ID) | |||
matchers = get_flow_ctx(flow, CTX_BUBBLE_MATCHERS) | |||
if req_id is not None and matchers is not None: | |||
bubble_log('responseheaders: req_id='+req_id+' with matchers: '+repr(matchers)) | |||
if HEADER_CONTENT_TYPE in flow.response.headers: | |||
content_type = flow.response.headers[HEADER_CONTENT_TYPE] | |||
if matchers: | |||
any_content_type_matches = False | |||
for m in matchers: | |||
if 'contentTypeRegex' in m: | |||
typeRegex = m['contentTypeRegex'] | |||
if typeRegex is None: | |||
typeRegex = '^text/html.*' | |||
if re.match(typeRegex, content_type): | |||
any_content_type_matches = True | |||
bubble_log('responseheaders: req_id='+req_id+' found at least one matcher for content_type ('+content_type+'), filtering') | |||
break | |||
if not any_content_type_matches: | |||
bubble_log('responseheaders: req_id='+req_id+' no matchers for content_type ('+content_type+'), passing thru') | |||
return | |||
if HEADER_CONTENT_ENCODING in flow.response.headers: | |||
content_encoding = flow.response.headers[HEADER_CONTENT_ENCODING] | |||
else: | |||
content_encoding = None | |||
content_length_value = flow.response.headers.pop(HEADER_CONTENT_LENGTH, None) | |||
bubble_log('responseheaders: req_id='+req_id+' content_encoding='+repr(content_encoding) + ', content_type='+repr(content_type)) | |||
flow.response.stream = bubble_modify(flow, req_id, content_encoding, content_type) | |||
if content_length_value: | |||
flow.response.headers['transfer-encoding'] = 'chunked' | |||
# find server_conn to set fake_chunks on | |||
if flow.live and flow.live.ctx: | |||
ctx = flow.live.ctx | |||
while not hasattr(ctx, 'server_conn'): | |||
if hasattr(ctx, 'ctx'): | |||
ctx = ctx.ctx | |||
else: | |||
bubble_log('responseheaders: error finding server_conn. last ctx has no further ctx. type='+str(type(ctx))+' vars='+str(vars(ctx))) | |||
return | |||
if not hasattr(ctx, 'server_conn'): | |||
bubble_log('responseheaders: error finding server_conn. ctx type='+str(type(ctx))+' vars='+str(vars(ctx))) | |||
return | |||
content_length = int(content_length_value) | |||
ctx.server_conn.rfile.fake_chunks = content_length | |||
add_flow_ctx(flow, CTX_CONTENT_LENGTH, content_length) | |||
add_flow_ctx(flow, CTX_CONTENT_LENGTH_SENT, 0) | |||
else: | |||
bubble_log('responseheaders: no matchers, passing thru') | |||
pass | |||
else: | |||
bubble_log('responseheaders: no '+HEADER_CONTENT_TYPE +' header, passing thru') | |||
pass | |||
else: | |||
bubble_log('responseheaders: no '+CTX_BUBBLE_MATCHERS +' in ctx, passing thru') | |||
pass |
@@ -1,112 +0,0 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
# Parts of this are borrowed from tls_passthrough.py in the mitmproxy project. The mitmproxy license is reprinted here: | |||
# | |||
# Copyright (c) 2013, Aldo Cortesi. All rights reserved. | |||
# | |||
# Permission is hereby granted, free of charge, to any person obtaining a copy | |||
# of this software and associated documentation files (the "Software"), to deal | |||
# in the Software without restriction, including without limitation the rights | |||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |||
# copies of the Software, and to permit persons to whom the Software is | |||
# furnished to do so, subject to the following conditions: | |||
# | |||
# The above copyright notice and this permission notice shall be included in | |||
# all copies or substantial portions of the Software. | |||
# | |||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |||
# SOFTWARE. | |||
# | |||
from mitmproxy.proxy.protocol import TlsLayer, RawTCPLayer | |||
from mitmproxy.exceptions import TlsProtocolException | |||
from bubble_api import bubble_log, bubble_passthru, bubble_activity_log, redis_set | |||
import redis | |||
import json | |||
REDIS_DNS_PREFIX = 'bubble_dns_' | |||
REDIS_PASSTHRU_PREFIX = 'bubble_passthru_' | |||
REDIS_PASSTHRU_DURATION = 60 * 60 # 1 hour timeout on passthru | |||
REDIS = redis.Redis(host='127.0.0.1', port=6379, db=0) | |||
def passthru_cache_prefix(client_addr, server_addr): | |||
return REDIS_PASSTHRU_PREFIX + client_addr + '_' + server_addr | |||
class TlsFeedback(TlsLayer): | |||
""" | |||
Monkey-patch _establish_tls_with_client to get feedback if TLS could be established | |||
successfully on the client connection (which may fail due to cert pinning). | |||
""" | |||
def _establish_tls_with_client(self): | |||
client_address = self.client_conn.address[0] | |||
server_address = self.server_conn.address[0] | |||
try: | |||
super(TlsFeedback, self)._establish_tls_with_client() | |||
except TlsProtocolException as e: | |||
bubble_log('_establish_tls_with_client: TLS error for '+repr(server_address)+', enabling passthru') | |||
cache_key = passthru_cache_prefix(client_address, server_address) | |||
fqdn = fqdn_for_addr(server_address) | |||
redis_set(cache_key, json.dumps({'fqdn': fqdn, 'addr': server_address, 'passthru': True}), ex=REDIS_PASSTHRU_DURATION) | |||
raise e | |||
def fqdn_for_addr(addr): | |||
fqdn = REDIS.get(REDIS_DNS_PREFIX + addr) | |||
if fqdn is None or len(fqdn) == 0: | |||
bubble_log('check_bubble_passthru: no FQDN found for addr '+repr(addr)+', checking raw addr') | |||
fqdn = b'' | |||
return fqdn.decode() | |||
def check_bubble_passthru(remote_addr, addr, fqdn): | |||
if bubble_passthru(remote_addr, addr, fqdn): | |||
bubble_log('check_bubble_passthru: bubble_passthru returned True for FQDN/addr '+repr(fqdn)+'/'+repr(addr)+', returning True') | |||
return {'fqdn': fqdn, 'addr': addr, 'passthru': True} | |||
bubble_log('check_bubble_passthru: bubble_passthru returned False for FQDN/addr '+repr(fqdn)+'/'+repr(addr)+', returning False') | |||
return {'fqdn': fqdn, 'addr': addr, 'passthru': False} | |||
def should_passthru(remote_addr, addr): | |||
prefix = 'should_passthru: '+repr(addr)+' ' | |||
bubble_log(prefix+'starting...') | |||
cache_key = passthru_cache_prefix(remote_addr, addr) | |||
passthru_json = REDIS.get(cache_key) | |||
if passthru_json is None or len(passthru_json) == 0: | |||
bubble_log(prefix+' not in redis or empty, calling check_bubble_passthru...') | |||
fqdn = fqdn_for_addr(addr) | |||
if fqdn is None or len(fqdn) == 0: | |||
bubble_log(prefix+' no fqdn found for addr '+addr+', returning (uncached) passthru = False') | |||
return {'fqdn': None, 'addr': addr, 'passthru': False} | |||
passthru = check_bubble_passthru(remote_addr, addr, fqdn) | |||
bubble_log(prefix+'check_bubble_passthru returned '+repr(passthru)+", storing in redis...") | |||
redis_set(cache_key, json.dumps(passthru), ex=REDIS_PASSTHRU_DURATION) | |||
else: | |||
bubble_log('found passthru_json='+str(passthru_json)+', touching key in redis') | |||
passthru = json.loads(passthru_json) | |||
REDIS.touch(cache_key) | |||
bubble_log(prefix+'returning '+repr(passthru)) | |||
return passthru | |||
def next_layer(next_layer): | |||
if isinstance(next_layer, TlsLayer) and next_layer._client_tls: | |||
client_address = next_layer.client_conn.address[0] | |||
server_address = next_layer.server_conn.address[0] | |||
passthru = should_passthru(client_address, server_address) | |||
if passthru['passthru']: | |||
bubble_log('next_layer: TLS passthru for ' + repr(next_layer.server_conn.address)) | |||
bubble_activity_log(client_address, server_address, 'tls_passthru', passthru['fqdn']) | |||
next_layer_replacement = RawTCPLayer(next_layer.ctx, ignore=True) | |||
next_layer.reply.send(next_layer_replacement) | |||
else: | |||
bubble_activity_log(client_address, server_address, 'tls_intercept', passthru['fqdn']) | |||
next_layer.__class__ = TlsFeedback |
@@ -1,148 +0,0 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
import re | |||
import time | |||
import uuid | |||
from bubble_api import bubble_matchers, bubble_log, bubble_activity_log, CTX_BUBBLE_MATCHERS, BUBBLE_URI_PREFIX, CTX_BUBBLE_ABORT, CTX_BUBBLE_PASSTHRU, CTX_BUBBLE_REQUEST_ID, add_flow_ctx | |||
from bubble_config import bubble_host, bubble_host_alias | |||
# This regex extracts splits the host header into host and port. | |||
# Handles the edge case of IPv6 addresses containing colons. | |||
# https://bugzilla.mozilla.org/show_bug.cgi?id=45891 | |||
parse_host_header = re.compile(r"^(?P<host>[^:]+|\[.+\])(?::(?P<port>\d+))?$") | |||
class Rerouter: | |||
@staticmethod | |||
def get_matchers(flow, host): | |||
if host is None: | |||
return None | |||
if flow.request.path and flow.request.path.startswith(BUBBLE_URI_PREFIX): | |||
bubble_log("get_matchers: not filtering special bubble path: "+flow.request.path) | |||
return None | |||
remote_addr = str(flow.client_conn.address[0]) | |||
try: | |||
host = host.decode() | |||
except (UnicodeDecodeError, AttributeError): | |||
try: | |||
host = str(host) | |||
except Exception as e: | |||
bubble_log('get_matchers: host '+repr(host)+' could not be decoded, type='+str(type(host))+' e='+repr(e)) | |||
return None | |||
if host == bubble_host or host == bubble_host_alias: | |||
bubble_log('get_matchers: request is for bubble itself ('+host+'), not matching') | |||
return None | |||
req_id = str(host) + '.' + str(uuid.uuid4()) + '.' + str(time.time()) | |||
bubble_log("get_matchers: requesting match decision for req_id="+req_id) | |||
resp = bubble_matchers(req_id, remote_addr, flow, host) | |||
if not resp: | |||
bubble_log('get_matchers: no response for remote_addr/host: '+remote_addr+'/'+str(host)) | |||
return None | |||
matchers = [] | |||
if 'matchers' in resp and resp['matchers'] is not None: | |||
for m in resp['matchers']: | |||
if 'urlRegex' in m: | |||
bubble_log('get_matchers: checking for match of path='+flow.request.path+' against regex: '+m['urlRegex']) | |||
else: | |||
bubble_log('get_matchers: checking for match of path='+flow.request.path+' -- NO regex, skipping') | |||
continue | |||
if re.match(m['urlRegex'], flow.request.path): | |||
bubble_log('get_matchers: rule matched, adding rule: '+m['rule']) | |||
matchers.append(m) | |||
else: | |||
bubble_log('get_matchers: rule (regex='+m['urlRegex']+') did NOT match, skipping rule: '+m['rule']) | |||
else: | |||
bubble_log('get_matchers: no matchers. response='+repr(resp)) | |||
decision = None | |||
if 'decision' in resp: | |||
decision = resp['decision'] | |||
matcher_response = { 'decision': decision, 'matchers': matchers, 'request_id': req_id } | |||
bubble_log("get_matchers: returning "+repr(matcher_response)) | |||
return matcher_response | |||
def request(self, flow): | |||
client_address = flow.client_conn.address[0] | |||
server_address = flow.server_conn.address[0] | |||
if flow.client_conn.tls_established: | |||
flow.request.scheme = "https" | |||
sni = flow.client_conn.connection.get_servername() | |||
port = 443 | |||
else: | |||
flow.request.scheme = "http" | |||
sni = None | |||
port = 80 | |||
host_header = flow.request.host_header | |||
# bubble_log("dns_spoofing.request: host_header is "+repr(host_header)) | |||
if host_header: | |||
m = parse_host_header.match(host_header) | |||
if m: | |||
host_header = m.group("host").strip("[]") | |||
if m.group("port"): | |||
port = int(m.group("port")) | |||
# Determine if this request should be filtered | |||
if sni or host_header: | |||
host = str(sni or host_header) | |||
if host.startswith("b'"): | |||
host = host[2:-1] | |||
log_url = flow.request.scheme + '://' + host + flow.request.path | |||
matcher_response = self.get_matchers(flow, sni or host_header) | |||
if matcher_response: | |||
if 'decision' in matcher_response and matcher_response['decision'] is not None and matcher_response['decision'] == 'passthru': | |||
bubble_log('dns_spoofing.request: passthru response returned, passing thru and NOT performing TLS interception...') | |||
add_flow_ctx(flow, CTX_BUBBLE_PASSTHRU, True) | |||
bubble_activity_log(client_address, server_address, 'http_passthru', log_url) | |||
return | |||
elif 'decision' in matcher_response and matcher_response['decision'] is not None and matcher_response['decision'].startswith('abort_'): | |||
bubble_log('dns_spoofing.request: found abort code: ' + str(matcher_response['decision']) + ', aborting') | |||
if matcher_response['decision'] == 'abort_ok': | |||
abort_code = 200 | |||
elif matcher_response['decision'] == 'abort_not_found': | |||
abort_code = 404 | |||
else: | |||
bubble_log('dns_spoofing.request: unknown abort code: ' + str(matcher_response['decision']) + ', aborting with 404 Not Found') | |||
abort_code = 404 | |||
add_flow_ctx(flow, CTX_BUBBLE_ABORT, abort_code) | |||
bubble_activity_log(client_address, server_address, 'http_abort' + str(abort_code), log_url) | |||
return | |||
elif 'decision' in matcher_response and matcher_response['decision'] is not None and matcher_response['decision'] == 'no_match': | |||
bubble_log('dns_spoofing.request: decision was no_match, passing thru...') | |||
bubble_activity_log(client_address, server_address, 'http_no_match', log_url) | |||
return | |||
elif ('matchers' in matcher_response | |||
and 'request_id' in matcher_response | |||
and len(matcher_response['matchers']) > 0): | |||
req_id = matcher_response['request_id'] | |||
bubble_log("dns_spoofing.request: found request_id: " + req_id + ' with matchers: ' + repr(matcher_response['matchers'])) | |||
add_flow_ctx(flow, CTX_BUBBLE_MATCHERS, matcher_response['matchers']) | |||
add_flow_ctx(flow, CTX_BUBBLE_REQUEST_ID, req_id) | |||
bubble_activity_log(client_address, server_address, 'http_match', log_url) | |||
else: | |||
bubble_log('dns_spoofing.request: no rules returned, passing thru...') | |||
bubble_activity_log(client_address, server_address, 'http_no_rules', log_url) | |||
else: | |||
bubble_log('dns_spoofing.request: no matcher_response returned, passing thru...') | |||
# bubble_activity_log(client_address, server_address, 'http_no_matcher_response', log_url) | |||
else: | |||
bubble_log('dns_spoofing.request: no sni/host found, not applying rules to path: ' + flow.request.path) | |||
bubble_activity_log(client_address, server_address, 'http_no_sni_or_host', 'n/a') | |||
flow.request.host_header = host_header | |||
flow.request.host = sni or host_header | |||
flow.request.port = port | |||
addons = [Rerouter()] |
@@ -1,34 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
CERT="${1:?no cert provided}" | |||
TIMEOUT=${2:-0} | |||
function die { | |||
echo 1>&2 "${1}" | |||
exit 1 | |||
} | |||
START=$(date +%s) | |||
while [[ ! -f "${CERT}" ]] ; do | |||
ELAPSED=$(expr $(date +%s) - ${START}) | |||
if [[ ${ELAPSED} -gt ${TIMEOUT} ]] ; then | |||
break | |||
fi | |||
echo "Cert file does not exist, sleeping then rechecking: ${CERT}" | |||
sleep 5s | |||
done | |||
if [[ ! -f "${CERT}" ]] ; then | |||
die "Cert file does not exist: ${CERT}" | |||
fi | |||
if [[ "${CERT}" == *.pem || "${CERT}" == *.p12 ]] ; then | |||
openssl x509 -in "${CERT}" -inform PEM -out "${CERT}.crt" || die "Error converting certificate" | |||
CERT="${CERT}.crt" | |||
fi | |||
mkdir -p /usr/local/share/ca-certificates || die "Error ensuring CA certs directory exists" | |||
cp "${CERT}" /usr/local/share/ca-certificates || die "Error installing certificate" | |||
update-ca-certificates || die "Error updating CA certificates" |
@@ -1,80 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
LOG=/tmp/bubble.mitmdump_monitor.log | |||
function die { | |||
echo 1>&2 "${1}" | |||
log "${1}" | |||
exit 1 | |||
} | |||
function log { | |||
echo "$(date): ${1}" >> ${LOG} | |||
} | |||
BUBBLE_MITM_MARKER=/home/bubble/.mitmdump_monitor | |||
ROOT_KEY_MARKER=/usr/share/bubble/mitmdump_monitor | |||
MITMDUMP_PID_FILE=/home/mitmproxy/mitmdump.pid | |||
MAX_MITM_PCT_MEM=18 | |||
# Start with MITM proxy turned off | |||
if [[ ! -f ${BUBBLE_MITM_MARKER} ]] ; then | |||
echo -n off > ${BUBBLE_MITM_MARKER} && chown bubble ${BUBBLE_MITM_MARKER} || log "Error writing 'off' to ${ROOT_KEY_MARKER}" | |||
fi | |||
if [[ ! -f ${ROOT_KEY_MARKER} ]] ; then | |||
sleep 1s | |||
mkdir -p "$(dirname ${ROOT_KEY_MARKER})" && chmod 755 "$(dirname ${ROOT_KEY_MARKER})" || log "Error creating or setting permissions on ${ROOT_KEY_MARKER}" | |||
echo -n on > ${ROOT_KEY_MARKER} && touch ${ROOT_KEY_MARKER} && chmod 644 ${ROOT_KEY_MARKER} || log "Error writing 'on' to ${ROOT_KEY_MARKER}" | |||
fi | |||
function ensureMitmOn { | |||
log "Flushing PREROUTING before enabling MITM services" | |||
iptables -F PREROUTING -t nat || log "Error flushing port forwarding when enabling MITM services" | |||
log "Enabling MITM port forwarding on TCP port 80 -> 8888" | |||
iptables -I PREROUTING 1 -t nat -p tcp --dport 80 -j REDIRECT --to-ports 8888 || log "Error enabling MITM port forwarding 80 -> 8888" | |||
log "Enabling MITM port forwarding on TCP port 443 -> 8888" | |||
iptables -I PREROUTING 1 -t nat -p tcp --dport 443 -j REDIRECT --to-ports 8888 || log "Error enabling MITM port forwarding 443 -> 8888" | |||
echo -n on > ${ROOT_KEY_MARKER} | |||
} | |||
function ensureMitmOff { | |||
log "Flushing PREROUTING to disable MITM services" | |||
iptables -F PREROUTING -t nat || log "Error flushing port forwarding when disabling MITM services" | |||
echo -n off > ${ROOT_KEY_MARKER} || log "Error writing 'off' to ${ROOT_KEY_MARKER}" | |||
} | |||
log "Watching marker file ${BUBBLE_MITM_MARKER} ..." | |||
sleep 2s && touch ${BUBBLE_MITM_MARKER} || log "Error touching ${BUBBLE_MITM_MARKER}" # first time through, always check and set on/off state | |||
while : ; do | |||
if [[ $(stat -c %Y ${BUBBLE_MITM_MARKER}) -gt $(stat -c %Y ${ROOT_KEY_MARKER}) ]] ; then | |||
if [[ ! -z "$(cmp -b ${ROOT_KEY_MARKER} ${BUBBLE_MITM_MARKER})" ]] ; then | |||
if [[ "$(cat ${BUBBLE_MITM_MARKER} | tr -d [[:space:]])" == "on" ]] ; then | |||
ensureMitmOn | |||
elif [[ "$(cat ${BUBBLE_MITM_MARKER} | tr -d [[:space:]])" == "off" ]] ; then | |||
ensureMitmOff | |||
else | |||
log "Error: marker file ${BUBBLE_MITM_MARKER} contained invalid value: $(cat ${BUBBLE_MITM_MARKER} | head -c 5)" | |||
fi | |||
fi | |||
fi | |||
# Check process memory usage, restart mitmdump if memory goes above max % allowed | |||
if [[ -f ${MITMDUMP_PID_FILE} && -s ${MITMDUMP_PID_FILE} ]] ; then | |||
MITM_PID="$(cat ${MITMDUMP_PID_FILE})" | |||
PCT_MEM="$(ps q ${MITM_PID} -o %mem --no-headers | tr -d [[:space:]] | cut -f1 -d. | sed 's/[^0-9]*//g')" | |||
# log "Info: mitmdump pid ${MITM_PID} using ${PCT_MEM}% of memory" | |||
if [[ ! -z "${PCT_MEM}" ]] ; then | |||
if [[ ${PCT_MEM} -ge ${MAX_MITM_PCT_MEM} ]] ; then | |||
log "Warn: mitmdump: pid=$(cat ${MITMDUMP_PID_FILE}) memory used > max, restarting: ${PCT_MEM}% >= ${MAX_MITM_PCT_MEM}%" | |||
supervisorctl restart mitmdump | |||
fi | |||
else | |||
log "Error: could not determine mitmdump % memory, maybe PID file ${MITMDUMP_PID_FILE} is out of date? pid found was ${MITM_PID}" | |||
fi | |||
else | |||
log "Error: mitmdump PID file ${MITMDUMP_PID_FILE} not found or empty" | |||
fi | |||
sleep 5s | |||
done |
@@ -1,30 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
function die { | |||
echo 1>&2 "${1}" | |||
exit 1 | |||
} | |||
CERTS_BACKUP=/home/bubble/mitm_certs | |||
if [[ ! -d ${CERTS_BACKUP} ]] ; then | |||
echo "No mitm_certs backup found, skipping restore" | |||
exit 0 | |||
fi | |||
MITM_CERTS=/home/mitmproxy/.mitmproxy | |||
if [[ -d ${MITM_CERTS} ]] ; then | |||
echo "Removing obsolete mitm certs: ${MITM_CERTS}" | |||
rm -rf ${MITM_CERTS} || die "Error removing obsolete mitm certs" | |||
if [[ -d ${MITM_CERTS} ]] ; then | |||
die "Error removing obsolete mitm certs: dir still exists: ${MITM_CERTS}" | |||
fi | |||
fi | |||
mkdir -p ${MITM_CERTS} || die "Error creating mitm certs dir: ${MITM_CERTS}" | |||
chmod 750 ${MITM_CERTS} || die "Error setting permissions on mitm certs dir: ${MITM_CERTS}" | |||
cp -R ${CERTS_BACKUP}/* ${MITM_CERTS}/ || die "Error restoring mitm certs" | |||
chown -R mitmproxy ${MITM_CERTS} || die "Error changing ownership of ${MITM_CERTS}" | |||
chgrp -R root ${MITM_CERTS} || die "Error changing group ownership of ${MITM_CERTS}" | |||
chmod 440 ${MITM_CERTS}/* || die "Error setting permissions on mitm certs files" |
@@ -1,22 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
MITM_PORT=${1:?no port provided} | |||
cd /home/mitmproxy/mitmproxy && \ | |||
./dev.sh && . ./venv/bin/activate && \ | |||
mitmdump \ | |||
--listen-host 0.0.0.0 \ | |||
--listen-port ${MITM_PORT} \ | |||
--showhost \ | |||
--no-http2 \ | |||
--set block_global=true \ | |||
--set block_private=false \ | |||
--set termlog_verbosity=debug \ | |||
--set flow_detail=3 \ | |||
--set stream_large_bodies=5m \ | |||
--set keep_host_header \ | |||
-s ./dns_spoofing.py \ | |||
-s ./bubble_passthru.py \ | |||
-s ./bubble_modify.py \ | |||
--mode transparent |
@@ -1,23 +0,0 @@ | |||
#!/bin/bash | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
MITM_DIR=${1:?no mitm dir specified} | |||
CERT_NAME=${2:?no cert name specified} | |||
if [[ ! -d "${MITM_DIR}" ]] ; then | |||
echo "mitm dir does not exist or is not a directory: ${MITM_DIR}" | |||
exit 1 | |||
fi | |||
OPTIONS_FILE="${MITM_DIR}/mitmproxy/options.py" | |||
if [[ ! -f "${OPTIONS_FILE}" ]] ; then | |||
echo "options.py not found in mitm dir: ${MITM_DIR}" | |||
exit 1 | |||
fi | |||
if [[ $(cat "${OPTIONS_FILE}" | egrep '^CONF_BASENAME =' | grep "${CERT_NAME}" | wc -l | tr -d ' ') -eq 0 ]] ; then | |||
temp="$(mktemp /tmp/options.py.XXXXXXX)" | |||
cat "${OPTIONS_FILE}" | sed -e 's/^CONF_BASENAME\s*=.*/CONF_BASENAME = "'"${CERT_NAME}"'"/' > "${temp}" | |||
mv "${temp}" "${OPTIONS_FILE}" | |||
fi |
@@ -1,5 +0,0 @@ | |||
[program:mitmdump_monitor] | |||
stdout_logfile = /dev/null | |||
stderr_logfile = /dev/null | |||
command=/usr/local/sbin/mitmdump_monitor.sh |
@@ -1,36 +1,6 @@ | |||
# | |||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||
# | |||
- name: Install python3, pip, virtualenv and required dependencies | |||
apt: | |||
name: [ 'python3-pip', 'python3-venv', 'libc6-dev', 'libpython3-dev', 'g++', 'libffi-dev' ] | |||
state: present | |||
update_cache: yes | |||
- name: Install supervisor conf file | |||
template: | |||
src: supervisor_mitmproxy.conf.j2 | |||
dest: /etc/supervisor/conf.d/mitmproxy.conf | |||
owner: root | |||
group: root | |||
mode: 0400 | |||
- name: Create mitmproxy user | |||
user: | |||
name: mitmproxy | |||
comment: mitmdump user | |||
shell: /bin/bash | |||
system: yes | |||
home: /home/mitmproxy | |||
- name: Creates mitmproxy dir | |||
file: | |||
path: /home/mitmproxy/mitmproxy | |||
owner: mitmproxy | |||
group: mitmproxy | |||
mode: 0755 | |||
state: directory | |||
- name: Set the cert name | |||
shell: set_cert_name.sh /home/mitmproxy/mitmproxy {{ server_alias }} | |||
@@ -1,7 +0,0 @@ | |||
[program:mitmdump] | |||
stdout_logfile = /home/mitmproxy/mitmdump-out.log | |||
stderr_logfile = /home/mitmproxy/mitmdump-err.log | |||
command=sudo -H -u mitmproxy bash -c "/home/mitmproxy/mitmproxy/run_mitmdump.sh {{ mitm_port }}" | |||
stopasgroup=true | |||
stopsignal=QUIT |
@@ -327,9 +327,7 @@ meter_tick_validating_node_network_and_plan=Verifying settings for Bubble | |||
meter_tick_creating_node=Creating Bubble node | |||
meter_tick_launching_node=Launching Bubble node | |||
meter_tick_preparing_roles=Preparing installation parameters | |||
meter_tick_writing_dns_records=Writing DNS records | |||
meter_tick_preparing_install=Creating installation package | |||
meter_tick_awaiting_dns=Awaiting DNS visibility | |||
meter_tick_starting_install=Connecting to node to install Bubble | |||
meter_tick_copying_ansible=Copying files required to install Bubble | |||
meter_tick_running_ansible=Starting Bubble installation | |||
@@ -369,8 +367,11 @@ meter_error_plan_not_enabled=Account plan is not enabled, cannot launch Bubble | |||
meter_error_node_cloud_not_found=Compute cloud was not found, cannot launch Bubble | |||
meter_error_bubble_jar_not_found=Bubble jar file was not found, cannot launch Bubble | |||
meter_error_roles_not_found=Ansible roles were not found, cannot launch Bubble | |||
meter_error_no_ip_or_ssh_key=Bubble node started, but does not have an IP address or SSH key, cannot install Bubble | |||
meter_error_starting_node=Error starting node | |||
meter_error_dns=Error setting DNS entries for node | |||
meter_error_no_ip=Bubble node started, but does not have an IP address or SSH key, cannot install Bubble | |||
meter_error_role_validation_errors=Validation of ansible roles failed, cannot install Bubble | |||
meter_start_or_dns_error=Error starting node or writing DNS records | |||
# Launch progress meter: catch-all for unknown/unmapped errors | |||
meter_unknown_error=An unknown error occurred | |||
@@ -395,7 +396,6 @@ msg_node_state_starting=starting | |||
msg_node_state_booting=booting | |||
msg_node_state_booted=booted | |||
msg_node_state_preparing_install=preparing installation | |||
msg_node_state_awaiting_dns=waiting for DNS to be visible | |||
msg_node_state_installing=installing | |||
msg_node_state_running=running | |||
msg_node_state_stopping=stopping | |||
@@ -0,0 +1,6 @@ | |||
#!/bin/bash | |||
cd /root/ansible/roles/algo/algo \ | |||
&& python3 -m virtualenv --python="$(command -v python3)" .env \ | |||
&& source .env/bin/activate \ | |||
&& python3 -m pip install -U pip virtualenv \ | |||
&& python3 -m pip install -r requirements.txt |
@@ -185,3 +185,8 @@ fail_hint: | |||
booleans_map: | |||
Y: true | |||
y: true | |||
# Always set provider as local | |||
algo_provider: local | |||
algo_server_name: localhost |
@@ -1,7 +1,19 @@ | |||
# From algo/playbooks/cloud-pre.yml | |||
- name: Write algo_venv.sh | |||
copy: | |||
src: algo_venv.sh | |||
dest: /root/ansible/roles/algo/algo | |||
owner: root | |||
group: root | |||
mode: 0700 | |||
- name: Install algo pip packages | |||
shell: bash -c /root/ansible/roles/algo/algo/algo_venv.sh | |||
# From algo/roles/common/tasks/unattended-upgrades.yml | |||
- name: Install unattended-upgrades | |||
- name: Install algo apt packages | |||
apt: | |||
name: [ 'git', 'apparmor-utils', 'uuid-runtime', 'coreutils', 'cgroup-tools', 'openssl', 'gnupg2', 'linux-headers-generic' ] | |||
name: [ 'git', 'apparmor-utils', 'uuid-runtime', 'coreutils', 'cgroup-tools', 'openssl', 'gnupg2', 'linux-headers-generic', 'unattended-upgrades' ] | |||
state: present | |||
update_cache: yes | |||
@@ -13,7 +13,7 @@ | |||
get_url: | |||
url: https://github.com/getbubblenow/bubble-dist/raw/master/algo/master.zip | |||
dest: /tmp/algo.zip | |||
checksum: sha256:26d4e220e4527a6e7efd997be93c2c7764f7d8b09ea826268dad0371f802cdbb | |||
checksum: sha256:aaf25966bf9d6ad040db85b1b3f69c93a5be11a15537a240f036ef1c00d9e2df | |||
- name: Unzip algo master.zip | |||
unarchive: | |||
@@ -79,6 +79,14 @@ function count_table_rows { | |||
echo ${num_rows} | |||
} | |||
# INIT mode: If the database does not exist, set the source encryption key and exit | |||
if [[ "${3}" == "INIT" ]] ; then | |||
if [[ $(db_exists ${DB_NAME}) -eq 0 ]] ; then | |||
echo -n "${4}" > /home/bubble/.BUBBLE_DB_ENCRYPTION_KEY | |||
fi | |||
exit 0 | |||
fi | |||
if [[ ! -z "${DROP_AND_RECREATE}" && "${DROP_AND_RECREATE}" == "drop" ]] ; then | |||
dropdb ${DB_NAME} || echo "error dropping DB ${DB_NAME} (will continue)" | |||
dropuser ${DB_USER} || echo "error dropping DB user ${DB_USER} (will continue)" | |||
@@ -7,14 +7,6 @@ | |||
state: present | |||
update_cache: yes | |||
- name: Install supervisor conf file | |||
copy: | |||
src: supervisor_mitmproxy.conf | |||
dest: /etc/supervisor/conf.d/mitmproxy.conf | |||
owner: root | |||
group: root | |||
mode: 0400 | |||
- name: Create mitmproxy user | |||
user: | |||
name: mitmproxy | |||
@@ -82,6 +74,9 @@ | |||
- import_tasks: route.yml | |||
- name: Ensure mitmproxy user owns all mitmproxy files | |||
shell: chown -R mitmproxy /home/mitmproxy/mitmproxy | |||
- name: Install mitmdump_monitor | |||
copy: | |||
src: "mitmdump_monitor.sh" | |||
@@ -90,6 +85,14 @@ | |||
group: root | |||
mode: 0500 | |||
- name: Install supervisor conf file | |||
copy: | |||
src: supervisor_mitmproxy.conf | |||
dest: /etc/supervisor/conf.d/mitmproxy.conf | |||
owner: root | |||
group: root | |||
mode: 0400 | |||
- name: Install mitmdump_monitor supervisor conf file | |||
copy: | |||
src: supervisor_mitmdump_monitor.conf | |||