@@ -5,12 +5,9 @@ | |||||
# | # | ||||
# Prepares the bubble.jar file for active usage. | # Prepares the bubble.jar file for active usage. | ||||
# | # | ||||
# 1. Update role JSON in bubble-server/src/main/resources/ansible/default_roles.json | |||||
# Inserts "tgzB64" value with file://path to tarball | |||||
# 1. Copy scripts to bubble-server/target/classes/scripts | |||||
# | # | ||||
# 2. Copy scripts to bubble-server/target/classes/scripts | |||||
# | |||||
# 3. If the environment variable INSTALL_WEB is equal to "web", also build and install the bubble-web | |||||
# 2. If the environment variable INSTALL_WEB is equal to "web", also build and install the bubble-web | |||||
# site to bubble-server/target/classes/site | # site to bubble-server/target/classes/site | ||||
# | # | ||||
# Usage: | # Usage: | ||||
@@ -33,88 +30,17 @@ fi | |||||
BUBBLE_SERVER="$(cd "${SCRIPT_DIR}/../bubble-server" && pwd)" | BUBBLE_SERVER="$(cd "${SCRIPT_DIR}/../bubble-server" && pwd)" | ||||
CLASSES_DIR="${BUBBLE_SERVER}/target/classes" | CLASSES_DIR="${BUBBLE_SERVER}/target/classes" | ||||
DEFAULT_ROLES_RELATIVE="ansible/default_roles.json" | |||||
DEFAULT_ROLES="${CLASSES_DIR}/${DEFAULT_ROLES_RELATIVE}" | |||||
if [[ ! -f ${DEFAULT_ROLES} ]] ; then | |||||
die "default roles file not found: ${DEFAULT_ROLES}" | |||||
fi | |||||
if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then | |||||
for f in "${HOME}/bubble/current/bubble.env" "${HOME}/.bubble.env" ; do | |||||
if [[ -f "${f}" ]] ; then | |||||
LOCALSTORAGE_BASE_DIR=$(cat ${f} | grep -v '^#' | grep LOCALSTORAGE_BASE_DIR | awk -F '=' '{print $2}' | tr -d ' ') | |||||
break | |||||
fi | |||||
done | |||||
fi | |||||
if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then | |||||
echo "Warning: LOCALSTORAGE_BASE_DIR env var not defined and no bubble.env found, using ${HOME}/.bubble_local_storage" | |||||
LOCALSTORAGE_BASE_DIR="${HOME}/.bubble_local_storage" | |||||
fi | |||||
if [[ -z "${BUBBLE_JAR}" ]] ; then | if [[ -z "${BUBBLE_JAR}" ]] ; then | ||||
die "bubble jar not found: ${BUBBLE_JAR}" | die "bubble jar not found: ${BUBBLE_JAR}" | ||||
fi | fi | ||||
ROLES_DIR="$(cd "${SCRIPT_DIR}/../automation/roles" && pwd)" | |||||
if [[ ! -d ${ROLES_DIR} ]] ; then | |||||
die "automation/roles dir not found: ${ROLES_DIR}" | |||||
fi | |||||
LOCAL_NET_ID="$("${SCRIPT_DIR}/bconst" bubble.ApiConstants.ROOT_NETWORK_UUID 2> /dev/null)" | |||||
if [[ -z "${LOCAL_NET_ID}" ]] ; then | |||||
# try to read from source file | |||||
LOCAL_NET_ID="$(cat "${BUBBLE_SERVER}/src/main/java/bubble/ApiConstants.java" | grep -v '//' | egrep '\s+String\s+ROOT_NETWORK_UUID' | awk -F '"' '{print $2}')" | |||||
if [[ -z "${LOCAL_NET_ID}" ]] ; then | |||||
die "ROOT_NETWORK_UUID could not be read from ApiConstants" | |||||
fi | |||||
fi | |||||
echo "lbs = ${LOCALSTORAGE_BASE_DIR}" | |||||
UPDATED="$(mktemp /tmp/default_roles.XXXXXXX.json)" | |||||
cd ${ROLES_DIR} | |||||
echo "[" > "${UPDATED}" | |||||
for role in $(ls -1) ; do | |||||
echo "Processing role: ${role}" | |||||
ROLE_JSON="${role}/files/bubble_role.json" | |||||
if [[ ! -f "${ROLE_JSON}" ]] ; then | |||||
die "Json file not found for role ${role}: ${ROLE_JSON}" | |||||
fi | |||||
if [[ $(cat ${UPDATED} | wc -c) -gt 2 ]] ; then | |||||
echo "," >> ${UPDATED} | |||||
fi | |||||
role_name="$(cat "${ROLE_JSON}" | jq -r .name)" | |||||
role_path="automation/roles/${role_name}.tgz" | |||||
TGZ_PATH="${LOCALSTORAGE_BASE_DIR}/${role_path}" | |||||
mkdir -p $(dirname ${TGZ_PATH}) || die "Error creating parent dir for ${TGZ_PATH}" | |||||
tar czf ${TGZ_PATH} ${role} | |||||
cat ${ROLE_JSON} | jq --arg tgzB64 "storage://LocalStorage/${role_path}" '. + {tgzB64: $tgzB64}' >> ${UPDATED} | |||||
echo "------------------------------" | |||||
echo "Generated role JSON: ${role}" | |||||
echo "------------------------------" | |||||
done | |||||
echo "]" >> ${UPDATED} | |||||
jq . < ${UPDATED} > ${DEFAULT_ROLES} || die "Error writing ${DEFAULT_ROLES}, maybe some problems with ${UPDATED} ?" | |||||
echo "------------------------------------------------------------" | |||||
cat "${UPDATED}" | |||||
echo "------------------------------------------------------------" | |||||
cd ${LOCALSTORAGE_BASE_DIR} && jar uvf ${BUBBLE_JAR} automation || die "Error updating ${BUBBLE_JAR} with default role archives" | |||||
mkdir -p ${LOCALSTORAGE_BASE_DIR}/${LOCAL_NET_ID} && cp -R ${LOCALSTORAGE_BASE_DIR}/automation ${LOCALSTORAGE_BASE_DIR}/${LOCAL_NET_ID}/ || die "Error creating/copying network storage dir: ${LOCALSTORAGE_BASE_DIR}/${LOCAL_NET_ID}" | |||||
mkdir -p ${CLASSES_DIR}/scripts | mkdir -p ${CLASSES_DIR}/scripts | ||||
for script in $(cat ${BUBBLE_SERVER}/src/main/resources/ansible/bubble_scripts.txt) ; do | for script in $(cat ${BUBBLE_SERVER}/src/main/resources/ansible/bubble_scripts.txt) ; do | ||||
cp ${SCRIPT_DIR}/${script} ${CLASSES_DIR}/scripts || die "Error copying ${SCRIPT_DIR}/${script} -> ${CLASSES_DIR}/scripts" | cp ${SCRIPT_DIR}/${script} ${CLASSES_DIR}/scripts || die "Error copying ${SCRIPT_DIR}/${script} -> ${CLASSES_DIR}/scripts" | ||||
done | done | ||||
cd ${CLASSES_DIR} && jar uvf ${BUBBLE_JAR} scripts ${DEFAULT_ROLES_RELATIVE} || die "Error updating ${BUBBLE_JAR} with scripts" | |||||
echo "Updated $(ls -1 ${ROLES_DIR} | wc -l) roles in ${DEFAULT_ROLES}" | |||||
cd ${CLASSES_DIR} && jar uvf ${BUBBLE_JAR} scripts || die "Error updating ${BUBBLE_JAR} with scripts" | |||||
rm -f "${UPDATED}" | rm -f "${UPDATED}" | ||||
@@ -101,7 +101,7 @@ else | |||||
fi | fi | ||||
if [[ -z "${BUBBLE_JAR}" ]] ; then | if [[ -z "${BUBBLE_JAR}" ]] ; then | ||||
die "API jar file not found in ${BASE}/target" | |||||
die "API jar file not found" | |||||
fi | fi | ||||
if [[ -z "${BUBBLE_JVM_OPTS}" ]] ; then | if [[ -z "${BUBBLE_JVM_OPTS}" ]] ; then | ||||
@@ -41,6 +41,9 @@ public class ApiConstants { | |||||
public static final String DEFAULT_LOCALE = "en_US"; | public static final String DEFAULT_LOCALE = "en_US"; | ||||
public static final String[] ROLES_SAGE = {"common", "nginx", "bubble", "bubble_finalizer"}; | |||||
public static final String[] ROLES_NODE = {"common", "nginx", "algo", "mitmproxy", "bubble", "bubble_finalizer"}; | |||||
private static final AtomicReference<String> bubbleDefaultDomain = new AtomicReference<>(); | private static final AtomicReference<String> bubbleDefaultDomain = new AtomicReference<>(); | ||||
public static final ObjectMapper DB_JSON_MAPPER = COMPACT_MAPPER; | public static final ObjectMapper DB_JSON_MAPPER = COMPACT_MAPPER; | ||||
@@ -0,0 +1,52 @@ | |||||
/** | |||||
* Copyright (c) 2020 Bubble, Inc. All rights reserved. | |||||
* For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
*/ | |||||
package bubble.cloud.compute; | |||||
import com.fasterxml.jackson.annotation.JsonIgnore; | |||||
import lombok.Getter; | |||||
import lombok.NoArgsConstructor; | |||||
import lombok.Setter; | |||||
import lombok.experimental.Accessors; | |||||
import org.cobbzilla.util.collection.NameAndValue; | |||||
import org.cobbzilla.wizard.model.entityconfig.annotations.ECField; | |||||
import org.cobbzilla.wizard.model.entityconfig.annotations.ECSearchable; | |||||
import javax.persistence.Column; | |||||
import javax.persistence.Transient; | |||||
import javax.validation.constraints.Size; | |||||
import java.util.Arrays; | |||||
import static bubble.ApiConstants.DB_JSON_MAPPER; | |||||
import static org.cobbzilla.util.json.JsonUtil.json; | |||||
@NoArgsConstructor @Accessors(chain=true) | |||||
public class AnsibleRole { | |||||
@Getter @Setter private String name; | |||||
@ECSearchable(filter=true) @ECField(index=30) | |||||
@Size(max=10000, message="err.description.length") | |||||
@Getter @Setter private String description; | |||||
@Column(updatable=false, length=10000) | |||||
@JsonIgnore @Getter @Setter private String configJson; | |||||
public boolean hasConfig () { return configJson != null; } | |||||
@Transient public NameAndValue[] getConfig () { return configJson == null ? null : json(configJson, NameAndValue[].class); } | |||||
public AnsibleRole setConfig(NameAndValue[] config) { return setConfigJson(config == null ? null : json(config, DB_JSON_MAPPER)); } | |||||
@Column(updatable=false, length=1000) @ECField(index=80) | |||||
@JsonIgnore @Getter @Setter private String optionalConfigNamesJson; | |||||
public boolean hasOptionalConfigNames () { return optionalConfigNamesJson != null; } | |||||
@Transient public String[] getOptionalConfigNames() { return optionalConfigNamesJson == null ? null : json(optionalConfigNamesJson, String[].class); } | |||||
public AnsibleRole setOptionalConfigNames(String[] names) { return setOptionalConfigNamesJson(name == null ? null : json(names, DB_JSON_MAPPER)); } | |||||
public boolean isOptionalConfigName(String cfgName) { | |||||
final String[] names = getOptionalConfigNames(); | |||||
if (names == null) return false; | |||||
return Arrays.asList(names).contains(cfgName); | |||||
} | |||||
} |
@@ -14,6 +14,7 @@ public class ComputeConfig extends RegionalConfig { | |||||
@Getter @Setter private ComputeNodeSize[] sizes; | @Getter @Setter private ComputeNodeSize[] sizes; | ||||
@Getter @Setter private NameAndValue[] config; | @Getter @Setter private NameAndValue[] config; | ||||
@Getter @Setter private PackerConfig packer; | |||||
public CloudRegion getRegion (String name) { | public CloudRegion getRegion (String name) { | ||||
for (CloudRegion r : getRegions()) { | for (CloudRegion r : getRegions()) { | ||||
@@ -0,0 +1,13 @@ | |||||
package bubble.cloud.compute; | |||||
import com.fasterxml.jackson.databind.JsonNode; | |||||
import lombok.Getter; | |||||
import lombok.Setter; | |||||
import org.cobbzilla.util.collection.NameAndValue; | |||||
public class PackerConfig { | |||||
@Getter @Setter private NameAndValue[] vars; | |||||
@Getter @Setter private JsonNode builder; | |||||
} |
@@ -27,7 +27,6 @@ import java.util.Arrays; | |||||
import java.util.List; | import java.util.List; | ||||
import static bubble.ApiConstants.ROOT_NETWORK_UUID; | import static bubble.ApiConstants.ROOT_NETWORK_UUID; | ||||
import static bubble.dao.cloud.AnsibleRoleDAO.ROLE_PATH; | |||||
import static org.cobbzilla.util.daemon.ZillaRuntime.*; | import static org.cobbzilla.util.daemon.ZillaRuntime.*; | ||||
import static org.cobbzilla.util.io.FileUtil.*; | import static org.cobbzilla.util.io.FileUtil.*; | ||||
import static org.cobbzilla.util.json.JsonUtil.json; | import static org.cobbzilla.util.json.JsonUtil.json; | ||||
@@ -77,9 +76,6 @@ public class LocalStorageDriver extends CloudServiceDriverBase<LocalStorageConfi | |||||
return file.exists(); | return file.exists(); | ||||
} | } | ||||
// Special handling when bubble has not been activated for bootstrapping ansible roles | |||||
if (activated() || !key.startsWith(ROLE_PATH)) return false; | |||||
// check root network filesystem | // check root network filesystem | ||||
final File file = keyFileForNetwork(ROOT_NETWORK_UUID, getBaseDir(), key); | final File file = keyFileForNetwork(ROOT_NETWORK_UUID, getBaseDir(), key); | ||||
if (file.exists()) return true; | if (file.exists()) return true; | ||||
@@ -113,9 +109,6 @@ public class LocalStorageDriver extends CloudServiceDriverBase<LocalStorageConfi | |||||
return f.exists() ? new FileInputStream(f) : null; | return f.exists() ? new FileInputStream(f) : null; | ||||
} | } | ||||
// Special handling when bubble is not activated for bootstrapping ansible roles | |||||
if (activated() || !key.startsWith(ROLE_PATH)) return null; | |||||
// check root network filesystem | // check root network filesystem | ||||
final File rootNetFile = keyFileForNetwork(ROOT_NETWORK_UUID, getBaseDir(), key); | final File rootNetFile = keyFileForNetwork(ROOT_NETWORK_UUID, getBaseDir(), key); | ||||
if (rootNetFile.exists()) return new FileInputStream(rootNetFile); | if (rootNetFile.exists()) return new FileInputStream(rootNetFile); | ||||
@@ -10,7 +10,6 @@ import bubble.dao.account.message.AccountMessageDAO; | |||||
import bubble.dao.app.*; | import bubble.dao.app.*; | ||||
import bubble.dao.bill.AccountPaymentArchivedDAO; | import bubble.dao.bill.AccountPaymentArchivedDAO; | ||||
import bubble.dao.bill.BillDAO; | import bubble.dao.bill.BillDAO; | ||||
import bubble.dao.cloud.AnsibleRoleDAO; | |||||
import bubble.dao.cloud.BubbleDomainDAO; | import bubble.dao.cloud.BubbleDomainDAO; | ||||
import bubble.dao.cloud.BubbleFootprintDAO; | import bubble.dao.cloud.BubbleFootprintDAO; | ||||
import bubble.dao.cloud.CloudServiceDAO; | import bubble.dao.cloud.CloudServiceDAO; | ||||
@@ -62,7 +61,6 @@ public class AccountDAO extends AbstractCRUDDAO<Account> implements SqlViewSearc | |||||
@Autowired private AppDataDAO dataDAO; | @Autowired private AppDataDAO dataDAO; | ||||
@Autowired private AppMessageDAO appMessageDAO; | @Autowired private AppMessageDAO appMessageDAO; | ||||
@Autowired private RuleDriverDAO driverDAO; | @Autowired private RuleDriverDAO driverDAO; | ||||
@Autowired private AnsibleRoleDAO roleDAO; | |||||
@Autowired private BubbleDomainDAO domainDAO; | @Autowired private BubbleDomainDAO domainDAO; | ||||
@Autowired private CloudServiceDAO cloudDAO; | @Autowired private CloudServiceDAO cloudDAO; | ||||
@Autowired private BubbleFootprintDAO footprintDAO; | @Autowired private BubbleFootprintDAO footprintDAO; | ||||
@@ -231,7 +229,6 @@ public class AccountDAO extends AbstractCRUDDAO<Account> implements SqlViewSearc | |||||
ready.set(true); | ready.set(true); | ||||
cloudDAO.ensureNoopCloudsExist(account); | cloudDAO.ensureNoopCloudsExist(account); | ||||
copyTemplateObjects(acct, parent, roleDAO); | |||||
final Map<String, RuleDriver> drivers = new HashMap<>(); | final Map<String, RuleDriver> drivers = new HashMap<>(); | ||||
copyTemplateObjects(acct, parent, driverDAO, new AccountTemplate.CopyTemplate<>() { | copyTemplateObjects(acct, parent, driverDAO, new AccountTemplate.CopyTemplate<>() { | ||||
@@ -326,7 +323,7 @@ public class AccountDAO extends AbstractCRUDDAO<Account> implements SqlViewSearc | |||||
} | } | ||||
@Transactional(Transactional.TxType.REQUIRES_NEW) | @Transactional(Transactional.TxType.REQUIRES_NEW) | ||||
private void deleteTransactional(@NonNull final String uuid) { | |||||
protected void deleteTransactional(@NonNull final String uuid) { | |||||
// loading, and actually checking if the account with given UUID exists | // loading, and actually checking if the account with given UUID exists | ||||
final var account = findByUuid(uuid); | final var account = findByUuid(uuid); | ||||
@@ -1,135 +0,0 @@ | |||||
/** | |||||
* Copyright (c) 2020 Bubble, Inc. All rights reserved. | |||||
* For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
*/ | |||||
package bubble.dao.cloud; | |||||
import bubble.cloud.CloudServiceType; | |||||
import bubble.dao.account.AccountOwnedTemplateDAO; | |||||
import bubble.model.account.Account; | |||||
import bubble.model.cloud.AnsibleRole; | |||||
import bubble.model.cloud.CloudService; | |||||
import bubble.server.BubbleConfiguration; | |||||
import bubble.service.cloud.StorageService; | |||||
import lombok.Cleanup; | |||||
import lombok.extern.slf4j.Slf4j; | |||||
import org.cobbzilla.util.io.FileUtil; | |||||
import org.cobbzilla.util.io.Tarball; | |||||
import org.cobbzilla.util.io.TempDir; | |||||
import org.cobbzilla.util.string.Base64; | |||||
import org.hibernate.criterion.Order; | |||||
import org.springframework.beans.factory.annotation.Autowired; | |||||
import org.springframework.stereotype.Repository; | |||||
import java.io.*; | |||||
import java.util.List; | |||||
import static bubble.cloud.storage.StorageServiceDriver.STORAGE_PREFIX; | |||||
import static bubble.cloud.storage.local.LocalStorageDriver.LOCAL_STORAGE; | |||||
import static org.cobbzilla.util.daemon.ZillaRuntime.die; | |||||
import static org.cobbzilla.util.daemon.ZillaRuntime.shortError; | |||||
import static org.cobbzilla.util.string.Base64.DONT_GUNZIP; | |||||
import static org.cobbzilla.wizard.resources.ResourceUtil.invalidEx; | |||||
@Repository @Slf4j | |||||
public class AnsibleRoleDAO extends AccountOwnedTemplateDAO<AnsibleRole> { | |||||
public static final String ROLE_PATH = "automation/roles/"; | |||||
@Autowired private CloudServiceDAO cloudDAO; | |||||
@Autowired private StorageService storageService; | |||||
@Autowired private BubbleConfiguration configuration; | |||||
@Override public Order getDefaultSortOrder() { return PRIORITY_ASC; } | |||||
@Override public Object preCreate(AnsibleRole role) { | |||||
// convert [[ .. ]] to {{ }} | |||||
if (role.hasConfig()) role.setConfigJson(role.getConfigJson().replace("[[", "{{").replace("]]", "}}")); | |||||
if (!role.hasTgzB64()) throw invalidEx("err.tgzB64.required"); | |||||
// Is it a raw tgz? | |||||
if (role.isTgzB64raw()) { | |||||
// Ensure it is a valid tarball | |||||
try { | |||||
@Cleanup("delete") final TempDir temp = new TempDir(); | |||||
final File tarballFile = new File(temp, role.getName() + ".tgz"); | |||||
FileUtil.toFile(tarballFile, new ByteArrayInputStream(Base64.decode(role.getTgzB64(), DONT_GUNZIP))); | |||||
Tarball.unroll(tarballFile, temp); | |||||
final File namedRoleDir = new File(temp, role.getRoleName()); | |||||
if (!namedRoleDir.exists() || !namedRoleDir.isDirectory()) throw invalidEx("err.tgzB64.invalid.noRolesDir", "no roles/"+ role.getName()+" dir"); | |||||
// the role dir and the tarball are the only 2 files here | |||||
final String[] files = temp.list(); | |||||
if (files == null || files.length != 2) { | |||||
throw invalidEx("err.tgzB64.invalid.wrongNumberOfFiles", "multiple entries in tarball base directory"); | |||||
} | |||||
final File mainTask = new File(new File(namedRoleDir, "tasks"), "main.yml"); | |||||
if (!mainTask.exists() || !mainTask.isFile()) throw invalidEx("err.tgzB64.invalid.missingTasksMainYml", "no roles/"+ role.getName()+"/tasks/main.yml file"); | |||||
final String key = ROLE_PATH + tarballFile.getName(); | |||||
final List<CloudService> clouds = cloudDAO.findByAccountAndType(role.getAccount(), CloudServiceType.storage); | |||||
String stored = null; | |||||
for (CloudService cloud : clouds) { | |||||
try { | |||||
@Cleanup final FileInputStream in = new FileInputStream(tarballFile); | |||||
cloud.getStorageDriver(configuration).write(configuration.getThisNode().getUuid(), key, in); | |||||
stored = STORAGE_PREFIX+cloud.getName()+"/"+key; | |||||
break; | |||||
} catch (Exception e) { | |||||
log.warn("preCreate: error storing role archive to "+cloud.getName()+"/"+key+": "+e); | |||||
} | |||||
} | |||||
if (stored == null) { | |||||
return die("preCreate: failed to store role archive to any storage service"); | |||||
} | |||||
role.setTgzB64(stored); | |||||
} catch (Exception e) { | |||||
throw invalidEx("err.tgzB64.invalid.writingToStorage", "error validating tarball/writing to storage: "+e); | |||||
} | |||||
} else if (role.isTgzB64storage()) { | |||||
// Verify file exists in storage | |||||
try { | |||||
if (!storageService.exists(role.getAccount(), role.getTgzB64())) { | |||||
throw new IllegalStateException("preCreate: role archive not found in storage: "+role.getTgzB64()); | |||||
} | |||||
} catch (Exception e) { | |||||
boolean existsOnClasspath = false; | |||||
final String prefix = STORAGE_PREFIX + LOCAL_STORAGE + "/"; | |||||
final String roleTgzPath; | |||||
if (role.getTgzB64().startsWith(prefix + "automation/roles/")) { | |||||
// check classpath | |||||
roleTgzPath = role.getTgzB64().substring(prefix.length()); | |||||
try { | |||||
@Cleanup final InputStream in = getClass().getClassLoader().getResourceAsStream(roleTgzPath); | |||||
existsOnClasspath = in != null; | |||||
if (existsOnClasspath) { | |||||
if (!storageService.write(role.getAccount(), role.getTgzB64(), in)) { | |||||
log.warn("preCreate: error writing role archive from classpath:"+roleTgzPath+" -> storage:"+role.getTgzB64()); | |||||
} | |||||
} | |||||
} catch (Exception ioe) { | |||||
log.warn("preCreate: role archive not found in storage ("+role.getTgzB64()+") and exception searching classpath ("+roleTgzPath+"): "+shortError(ioe)); | |||||
} | |||||
} else { | |||||
roleTgzPath = null; | |||||
} | |||||
if (!existsOnClasspath) { | |||||
throw invalidEx("err.tgzB64.invalid.readingFromStorage", "error reading from " + roleTgzPath + " : " + e); | |||||
} | |||||
} | |||||
} | |||||
return super.preCreate(role); | |||||
} | |||||
public List<AnsibleRole> findByAccountAndNames(Account account, String[] roles) { | |||||
return findByFieldAndFieldIn("account", account.getUuid(), "name", roles, getDefaultSortOrder()); | |||||
} | |||||
} |
@@ -4,9 +4,7 @@ | |||||
*/ | */ | ||||
package bubble.dao.cloud; | package bubble.dao.cloud; | ||||
import bubble.dao.account.AccountDAO; | |||||
import bubble.dao.account.AccountOwnedTemplateDAO; | import bubble.dao.account.AccountOwnedTemplateDAO; | ||||
import bubble.model.account.Account; | |||||
import bubble.model.cloud.BubbleDomain; | import bubble.model.cloud.BubbleDomain; | ||||
import bubble.model.cloud.CloudService; | import bubble.model.cloud.CloudService; | ||||
import bubble.server.BubbleConfiguration; | import bubble.server.BubbleConfiguration; | ||||
@@ -15,7 +13,6 @@ import org.hibernate.criterion.Order; | |||||
import org.springframework.beans.factory.annotation.Autowired; | import org.springframework.beans.factory.annotation.Autowired; | ||||
import org.springframework.stereotype.Repository; | import org.springframework.stereotype.Repository; | ||||
import java.util.ArrayList; | |||||
import java.util.List; | import java.util.List; | ||||
import static org.cobbzilla.wizard.resources.ResourceUtil.invalidEx; | import static org.cobbzilla.wizard.resources.ResourceUtil.invalidEx; | ||||
@@ -23,37 +20,16 @@ import static org.cobbzilla.wizard.resources.ResourceUtil.invalidEx; | |||||
@Repository @Slf4j | @Repository @Slf4j | ||||
public class BubbleDomainDAO extends AccountOwnedTemplateDAO<BubbleDomain> { | public class BubbleDomainDAO extends AccountOwnedTemplateDAO<BubbleDomain> { | ||||
@Autowired private AnsibleRoleDAO roleDAO; | |||||
@Autowired private CloudServiceDAO cloudDAO; | @Autowired private CloudServiceDAO cloudDAO; | ||||
@Autowired private AccountDAO accountDAO; | |||||
@Autowired private BubbleConfiguration configuration; | @Autowired private BubbleConfiguration configuration; | ||||
@Override public Order getDefaultSortOrder() { return PRIORITY_ASC; } | @Override public Order getDefaultSortOrder() { return PRIORITY_ASC; } | ||||
@Override public Object preCreate(BubbleDomain domain) { | @Override public Object preCreate(BubbleDomain domain) { | ||||
final String[] roles = domain.getRoles(); | |||||
if (roles == null || roles.length == 0) throw invalidEx("err.roles.required"); | |||||
final CloudService dnsService = cloudDAO.findByUuid(domain.getPublicDns()); | final CloudService dnsService = cloudDAO.findByUuid(domain.getPublicDns()); | ||||
if (dnsService == null) throw invalidEx("err.dns.notFound", "cloud service not found: "+domain.getPublicDns(), domain.getPublicDns()); | if (dnsService == null) throw invalidEx("err.dns.notFound", "cloud service not found: "+domain.getPublicDns(), domain.getPublicDns()); | ||||
final Account account = accountDAO.findByUuid(domain.getAccount()); | |||||
final List<String> validRoles = new ArrayList<>(); | |||||
for (String r : roles) { | |||||
if (roleDAO.findByAccountAndId(domain.getAccount(), r) == null) { | |||||
if (!account.hasParent()) { | |||||
validRoles.add(r); | |||||
} else if (roleDAO.findPublicTemplate(account.getParent(), r) == null) { | |||||
continue; | |||||
} | |||||
validRoles.add(r); | |||||
} else { | |||||
validRoles.add(r); | |||||
} | |||||
} | |||||
domain.setRoles(validRoles.toArray(new String[0])); | |||||
return super.preCreate(domain); | return super.preCreate(domain); | ||||
} | } | ||||
@@ -66,7 +66,6 @@ import static org.cobbzilla.wizard.resources.ResourceUtil.invalidEx; | |||||
@ECTypeChild(type=RuleDriver.class, backref="account"), | @ECTypeChild(type=RuleDriver.class, backref="account"), | ||||
@ECTypeChild(type=BubbleApp.class, backref="account"), | @ECTypeChild(type=BubbleApp.class, backref="account"), | ||||
@ECTypeChild(type=AppData.class, backref="account"), | @ECTypeChild(type=AppData.class, backref="account"), | ||||
@ECTypeChild(type=AnsibleRole.class, backref="account"), | |||||
@ECTypeChild(type=CloudService.class, backref="account"), | @ECTypeChild(type=CloudService.class, backref="account"), | ||||
@ECTypeChild(type=BubbleFootprint.class, backref="account"), | @ECTypeChild(type=BubbleFootprint.class, backref="account"), | ||||
@ECTypeChild(type=BubbleDomain.class, backref="account"), | @ECTypeChild(type=BubbleDomain.class, backref="account"), | ||||
@@ -5,7 +5,6 @@ | |||||
package bubble.model.boot; | package bubble.model.boot; | ||||
import bubble.model.account.AccountSshKey; | import bubble.model.account.AccountSshKey; | ||||
import bubble.model.cloud.AnsibleRole; | |||||
import bubble.model.cloud.BubbleDomain; | import bubble.model.cloud.BubbleDomain; | ||||
import bubble.model.cloud.CloudService; | import bubble.model.cloud.CloudService; | ||||
import lombok.Getter; | import lombok.Getter; | ||||
@@ -37,9 +36,6 @@ public class ActivationRequest { | |||||
@HasValue(message="err.networkName.required") | @HasValue(message="err.networkName.required") | ||||
@Getter @Setter private String networkName = "boot-network"; | @Getter @Setter private String networkName = "boot-network"; | ||||
@Getter @Setter private AnsibleRole[] roles; | |||||
public boolean hasRoles () { return !empty(roles); } | |||||
@Getter @Setter private Map<String, CloudServiceConfig> cloudConfigs = new LinkedHashMap<>(); | @Getter @Setter private Map<String, CloudServiceConfig> cloudConfigs = new LinkedHashMap<>(); | ||||
public boolean hasCloudConfigs () { return !empty(cloudConfigs); } | public boolean hasCloudConfigs () { return !empty(cloudConfigs); } | ||||
public ActivationRequest addCloudConfig(CloudService cloud) { | public ActivationRequest addCloudConfig(CloudService cloud) { | ||||
@@ -1,136 +0,0 @@ | |||||
/** | |||||
* Copyright (c) 2020 Bubble, Inc. All rights reserved. | |||||
* For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
*/ | |||||
package bubble.model.cloud; | |||||
import bubble.model.account.Account; | |||||
import bubble.model.account.AccountTemplate; | |||||
import com.fasterxml.jackson.annotation.JsonIgnore; | |||||
import lombok.Getter; | |||||
import lombok.NoArgsConstructor; | |||||
import lombok.Setter; | |||||
import lombok.experimental.Accessors; | |||||
import org.cobbzilla.util.collection.HasPriority; | |||||
import org.cobbzilla.util.collection.NameAndValue; | |||||
import org.cobbzilla.wizard.model.IdentifiableBase; | |||||
import org.cobbzilla.wizard.model.SemanticVersion; | |||||
import org.cobbzilla.wizard.model.entityconfig.annotations.*; | |||||
import org.cobbzilla.wizard.validation.HasValue; | |||||
import javax.persistence.*; | |||||
import javax.validation.constraints.Pattern; | |||||
import javax.validation.constraints.Size; | |||||
import java.util.Arrays; | |||||
import static bubble.ApiConstants.DB_JSON_MAPPER; | |||||
import static bubble.ApiConstants.EP_ROLES; | |||||
import static bubble.cloud.storage.StorageServiceDriver.STORAGE_PREFIX; | |||||
import static org.cobbzilla.util.daemon.ZillaRuntime.empty; | |||||
import static org.cobbzilla.util.json.JsonUtil.json; | |||||
import static org.cobbzilla.util.reflect.ReflectionUtil.copy; | |||||
@ECType(root=true, name="role") | |||||
@ECTypeURIs(baseURI=EP_ROLES, listFields={"account", "name", "description"}) | |||||
@Entity @NoArgsConstructor @Accessors(chain=true) | |||||
@ECIndexes({ | |||||
@ECIndex(unique=true, of={"account", "name"}), | |||||
@ECIndex(of={"account", "template", "enabled"}), | |||||
@ECIndex(of={"template", "enabled"}) | |||||
}) | |||||
public class AnsibleRole extends IdentifiableBase implements AccountTemplate, HasPriority { | |||||
public static final String[] COPY_FIELDS = { | |||||
"name", "priority", "description", "template", "enabled", | |||||
"configJson", "optionalConfigNamesJson", "tgzB64" | |||||
}; | |||||
public static final String ROLENAME_PATTERN = "[_A-Za-z0-9]+-[\\d]+\\.[\\d]+\\.[\\d]+"; | |||||
public AnsibleRole(AnsibleRole role) { copy(this, role, COPY_FIELDS); } | |||||
@ECSearchable(filter=true) @ECField(index=10) | |||||
@HasValue(message="err.name.required") | |||||
@Pattern(regexp=ROLENAME_PATTERN, message="err.name.invalid") | |||||
@ECIndex @Column(nullable=false, updatable=false, length=200) | |||||
@Getter @Setter private String name; | |||||
@ECSearchable @ECField(index=20) | |||||
@ECForeignKey(entity=Account.class) | |||||
@Column(nullable=false, updatable=false, length=UUID_MAXLEN) | |||||
@Getter @Setter private String account; | |||||
@JsonIgnore @Transient @Getter(lazy=true) private final String roleName = getRoleName(name); | |||||
@JsonIgnore @Transient @Getter(lazy=true) private final SemanticVersion version = getRoleVersion(name); | |||||
public static String getRoleName(String name) { | |||||
final int lastHyphen = name.lastIndexOf('-'); | |||||
return lastHyphen == -1 ? name : name.substring(0, lastHyphen); | |||||
} | |||||
public static boolean sameRoleName(String r1, String r2) { return getRoleName(r1).equals(getRoleName(r2)); } | |||||
public static SemanticVersion getRoleVersion(String name) { | |||||
final int lastHyphen = name.lastIndexOf('-'); | |||||
return lastHyphen == -1 ? null : new SemanticVersion(name.substring(lastHyphen +1)); | |||||
} | |||||
public static String findRole(String[] roles, String role) { | |||||
if (roles == null) return null; | |||||
return Arrays.stream(roles) | |||||
.filter(r -> sameRoleName(role, r)) | |||||
.findAny() | |||||
.orElse(null); | |||||
} | |||||
@ECSearchable(filter=true) @ECField(index=30) | |||||
@Size(max=10000, message="err.description.length") | |||||
@Getter @Setter private String description; | |||||
@ECSearchable @ECField(index=40) | |||||
@Enumerated(EnumType.STRING) | |||||
@ECIndex @Column(nullable=false, length=20) | |||||
@Getter @Setter private AnsibleInstallType install = AnsibleInstallType.standard; | |||||
public boolean shouldInstall(AnsibleInstallType installType) { | |||||
return install.shouldInstall(installType); | |||||
} | |||||
@ECSearchable @ECField(index=50) | |||||
@ECIndex @Column(nullable=false) | |||||
@Getter @Setter private Integer priority; | |||||
@ECSearchable @ECField(index=60) | |||||
@ECIndex @Column(nullable=false) | |||||
@Getter @Setter private Boolean template = false; | |||||
@ECSearchable @ECField(index=70) | |||||
@ECIndex @Column(nullable=false) | |||||
@Getter @Setter private Boolean enabled = true; | |||||
@Column(updatable=false, length=10000) | |||||
@JsonIgnore @Getter @Setter private String configJson; | |||||
public boolean hasConfig () { return configJson != null; } | |||||
@Transient public NameAndValue[] getConfig () { return configJson == null ? null : json(configJson, NameAndValue[].class); } | |||||
public AnsibleRole setConfig(NameAndValue[] config) { return setConfigJson(config == null ? null : json(config, DB_JSON_MAPPER)); } | |||||
@Column(updatable=false, length=1000) @ECField(index=80) | |||||
@JsonIgnore @Getter @Setter private String optionalConfigNamesJson; | |||||
public boolean hasOptionalConfigNames () { return optionalConfigNamesJson != null; } | |||||
@Transient public String[] getOptionalConfigNames() { return optionalConfigNamesJson == null ? null : json(optionalConfigNamesJson, String[].class); } | |||||
public AnsibleRole setOptionalConfigNames(String[] names) { return setOptionalConfigNamesJson(name == null ? null : json(names, DB_JSON_MAPPER)); } | |||||
// The Base64-encoded .tgz archive for the role directory. all paths should start with roles/<role-name>/... | |||||
// Then after it is stored (in AnsibleRoleDAO.preCreate), this becomes storage://CloudServiceName/path | |||||
@Column(updatable=false, length=200) | |||||
@Getter @Setter private String tgzB64; | |||||
public boolean hasTgzB64 () { return !empty(tgzB64); } | |||||
@Transient @JsonIgnore public boolean isTgzB64raw() { return tgzB64 != null && !tgzB64.startsWith(STORAGE_PREFIX); } | |||||
@Transient @JsonIgnore public boolean isTgzB64storage() { return tgzB64 != null && tgzB64.startsWith(STORAGE_PREFIX); } | |||||
public boolean isOptionalConfigName(String cfgName) { | |||||
final String[] names = getOptionalConfigNames(); | |||||
if (names == null) return false; | |||||
return Arrays.asList(names).contains(cfgName); | |||||
} | |||||
} |
@@ -6,7 +6,6 @@ package bubble.model.cloud; | |||||
import bubble.model.account.Account; | import bubble.model.account.Account; | ||||
import bubble.model.account.AccountTemplate; | import bubble.model.account.AccountTemplate; | ||||
import com.fasterxml.jackson.annotation.JsonIgnore; | |||||
import lombok.Getter; | import lombok.Getter; | ||||
import lombok.NoArgsConstructor; | import lombok.NoArgsConstructor; | ||||
import lombok.Setter; | import lombok.Setter; | ||||
@@ -24,20 +23,12 @@ import org.hibernate.annotations.Type; | |||||
import javax.persistence.Column; | import javax.persistence.Column; | ||||
import javax.persistence.Entity; | import javax.persistence.Entity; | ||||
import javax.persistence.Transient; | |||||
import javax.validation.constraints.Size; | import javax.validation.constraints.Size; | ||||
import java.util.Arrays; | |||||
import java.util.List; | |||||
import java.util.stream.Collectors; | |||||
import static bubble.ApiConstants.DB_JSON_MAPPER; | |||||
import static bubble.ApiConstants.EP_DOMAINS; | import static bubble.ApiConstants.EP_DOMAINS; | ||||
import static bubble.model.cloud.AnsibleRole.sameRoleName; | |||||
import static org.apache.commons.lang3.StringUtils.countMatches; | import static org.apache.commons.lang3.StringUtils.countMatches; | ||||
import static org.cobbzilla.util.daemon.ZillaRuntime.*; | |||||
import static org.cobbzilla.util.dns.DnsType.NS; | import static org.cobbzilla.util.dns.DnsType.NS; | ||||
import static org.cobbzilla.util.dns.DnsType.SOA; | import static org.cobbzilla.util.dns.DnsType.SOA; | ||||
import static org.cobbzilla.util.json.JsonUtil.json; | |||||
import static org.cobbzilla.util.reflect.ReflectionUtil.copy; | import static org.cobbzilla.util.reflect.ReflectionUtil.copy; | ||||
import static org.cobbzilla.wizard.model.crypto.EncryptedTypes.ENCRYPTED_STRING; | import static org.cobbzilla.wizard.model.crypto.EncryptedTypes.ENCRYPTED_STRING; | ||||
import static org.cobbzilla.wizard.model.crypto.EncryptedTypes.ENC_PAD; | import static org.cobbzilla.wizard.model.crypto.EncryptedTypes.ENC_PAD; | ||||
@@ -57,8 +48,7 @@ import static org.cobbzilla.wizard.model.crypto.EncryptedTypes.ENC_PAD; | |||||
public class BubbleDomain extends IdentifiableBase implements AccountTemplate, HasPriority { | public class BubbleDomain extends IdentifiableBase implements AccountTemplate, HasPriority { | ||||
public static final String[] UPDATE_FIELDS = {"description", "template", "enabled", "priority", "publicDns"}; | public static final String[] UPDATE_FIELDS = {"description", "template", "enabled", "priority", "publicDns"}; | ||||
public static final String[] CREATE_FIELDS = ArrayUtil.append(UPDATE_FIELDS, | |||||
"name", "rolesJson"); | |||||
public static final String[] CREATE_FIELDS = ArrayUtil.append(UPDATE_FIELDS, "name"); | |||||
public static final int DOMAIN_NAME_MAXLEN = 200; | public static final int DOMAIN_NAME_MAXLEN = 200; | ||||
@@ -108,49 +98,6 @@ public class BubbleDomain extends IdentifiableBase implements AccountTemplate, H | |||||
@Column(nullable=false, length=UUID_MAXLEN) | @Column(nullable=false, length=UUID_MAXLEN) | ||||
@Getter @Setter private String publicDns; | @Getter @Setter private String publicDns; | ||||
@Type(type=ENCRYPTED_STRING) @Column(columnDefinition="varchar("+(10000+ENC_PAD)+") NOT NULL") | |||||
@JsonIgnore @Getter @Setter private String rolesJson; | |||||
public boolean hasRoles () { return !empty(getRoles()); } | |||||
@Transient public String[] getRoles () { return rolesJson == null ? null : json(rolesJson, String[].class); } | |||||
public BubbleDomain setRoles (String[] roles) { return setRolesJson(roles == null ? null : json(roles, DB_JSON_MAPPER)); } | |||||
public String findRole(String r) { return AnsibleRole.findRole(getRoles(), r); } | |||||
public BubbleDomain addRole(String r) { | |||||
final String[] roles = getRoles(); | |||||
if (roles != null) { | |||||
if (Arrays.stream(roles).noneMatch(role -> sameRoleName(role, r))) { | |||||
return setRoles(ArrayUtil.append(roles, r)); | |||||
} else { | |||||
log.warn("addRole("+r+"): role already exists in domain "+getName()); | |||||
return this; | |||||
} | |||||
} else { | |||||
return setRoles(new String[]{r}); | |||||
} | |||||
} | |||||
public BubbleDomain removeRole(String r) { | |||||
final String[] roles = getRoles(); | |||||
if (roles == null) return null; | |||||
final List<String> newRoles = Arrays.stream(roles).filter(role -> !sameRoleName(role, r)).collect(Collectors.toList()); | |||||
return setRolesJson(json(newRoles)); | |||||
} | |||||
public BubbleDomain updateRole(String previous, String current) { | |||||
final String[] roles = getRoles(); | |||||
if (roles == null) return die("updateRole: no roles!"); | |||||
final String role = findRole(previous); | |||||
if (role != null) { | |||||
log.debug("updateRole: removing previous role: "+role); | |||||
removeRole(role); | |||||
} | |||||
log.debug("updateRole: adding new role: "+current); | |||||
return addRole(current); | |||||
} | |||||
public String networkFromFqdn(String fqdn, ValidationResult errors) { | public String networkFromFqdn(String fqdn, ValidationResult errors) { | ||||
if (!fqdn.endsWith("."+getName())) { | if (!fqdn.endsWith("."+getName())) { | ||||
errors.addViolation("err.fqdn.outOfNetwork"); | errors.addViolation("err.fqdn.outOfNetwork"); | ||||
@@ -518,13 +518,6 @@ public class AccountsResource { | |||||
return ok(BubbleDeviceType.getSelectableTypes()); | return ok(BubbleDeviceType.getSelectableTypes()); | ||||
} | } | ||||
@Path("/{id}"+EP_ROLES) | |||||
public AnsibleRolesResource getAnsibleRoles(@Context ContainerRequest ctx, | |||||
@PathParam("id") String id) { | |||||
final AccountContext c = new AccountContext(ctx, id); | |||||
return configuration.subResource(AnsibleRolesResource.class, c.account); | |||||
} | |||||
@Path("/{id}"+EP_VPN) | @Path("/{id}"+EP_VPN) | ||||
public VpnConfigResource getVpnConfig(@Context ContainerRequest ctx, | public VpnConfigResource getVpnConfig(@Context ContainerRequest ctx, | ||||
@PathParam("id") String id) { | @PathParam("id") String id) { | ||||
@@ -278,12 +278,6 @@ public class MeResource { | |||||
return configuration.subResource(NetworksResource.class, caller); | return configuration.subResource(NetworksResource.class, caller); | ||||
} | } | ||||
@Path(EP_ROLES) | |||||
public AnsibleRolesResource getAnsibleRoles(@Context ContainerRequest ctx) { | |||||
final Account caller = userPrincipal(ctx); | |||||
return configuration.subResource(AnsibleRolesResource.class, caller); | |||||
} | |||||
@Path(EP_SENT_NOTIFICATIONS) | @Path(EP_SENT_NOTIFICATIONS) | ||||
public SentNotificationsResource getSentNotificationsResource(@Context ContainerRequest ctx) { | public SentNotificationsResource getSentNotificationsResource(@Context ContainerRequest ctx) { | ||||
final Account caller = userPrincipal(ctx); | final Account caller = userPrincipal(ctx); | ||||
@@ -1,13 +0,0 @@ | |||||
/** | |||||
* Copyright (c) 2020 Bubble, Inc. All rights reserved. | |||||
* For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
*/ | |||||
package bubble.resources.cloud; | |||||
import bubble.model.account.Account; | |||||
public class AnsibleRolesResource extends AnsibleRolesResourceBase { | |||||
public AnsibleRolesResource(Account account) { super(account); } | |||||
} |
@@ -1,32 +0,0 @@ | |||||
/** | |||||
* Copyright (c) 2020 Bubble, Inc. All rights reserved. | |||||
* For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
*/ | |||||
package bubble.resources.cloud; | |||||
import bubble.dao.cloud.AnsibleRoleDAO; | |||||
import bubble.model.account.Account; | |||||
import bubble.model.cloud.AnsibleRole; | |||||
import bubble.resources.account.AccountOwnedTemplateResource; | |||||
import org.glassfish.grizzly.http.server.Request; | |||||
import org.glassfish.jersey.server.ContainerRequest; | |||||
import static org.cobbzilla.wizard.resources.ResourceUtil.invalidEx; | |||||
public class AnsibleRolesResourceBase extends AccountOwnedTemplateResource<AnsibleRole, AnsibleRoleDAO> { | |||||
public AnsibleRolesResourceBase(Account account) { super(account); } | |||||
@Override protected boolean canCreate(Request req, ContainerRequest ctx, Account caller, AnsibleRole request) { | |||||
// ensure a role with the same name/version does not exist for this account | |||||
final AnsibleRole existing = getDao().findByAccountAndId(caller.getUuid(), request.getName()); | |||||
if (existing != null) throw invalidEx("err.role.exists", "A role exists with name: "+request.getName()); | |||||
return super.canCreate(req, ctx, caller, request); | |||||
} | |||||
@Override protected boolean canUpdate(ContainerRequest ctx, Account caller, AnsibleRole found, AnsibleRole request) { | |||||
// roles cannot be updated | |||||
return false; | |||||
} | |||||
} |
@@ -1,163 +0,0 @@ | |||||
/** | |||||
* Copyright (c) 2020 Bubble, Inc. All rights reserved. | |||||
* For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
*/ | |||||
package bubble.resources.cloud; | |||||
import bubble.dao.cloud.AnsibleRoleDAO; | |||||
import bubble.dao.cloud.BubbleDomainDAO; | |||||
import bubble.model.account.Account; | |||||
import bubble.model.cloud.AnsibleRole; | |||||
import bubble.model.cloud.BubbleDomain; | |||||
import org.cobbzilla.wizard.model.SemanticVersion; | |||||
import org.glassfish.jersey.server.ContainerRequest; | |||||
import org.springframework.beans.factory.annotation.Autowired; | |||||
import javax.ws.rs.*; | |||||
import javax.ws.rs.core.Context; | |||||
import javax.ws.rs.core.Response; | |||||
import static bubble.model.cloud.AnsibleRole.*; | |||||
import static bubble.resources.account.AccountOwnedResource.validateAccountUuid; | |||||
import static org.cobbzilla.util.daemon.ZillaRuntime.die; | |||||
import static org.cobbzilla.util.http.HttpContentTypes.APPLICATION_JSON; | |||||
import static org.cobbzilla.wizard.resources.ResourceUtil.*; | |||||
@Consumes(APPLICATION_JSON) | |||||
@Produces(APPLICATION_JSON) | |||||
public class DomainRolesResource { | |||||
public Account account; | |||||
public BubbleDomain domain; | |||||
@Autowired private BubbleDomainDAO domainDAO; | |||||
@Autowired private AnsibleRoleDAO roleDAO; | |||||
public DomainRolesResource(Account account, BubbleDomain domain) { | |||||
this.account = account; | |||||
this.domain = domain; | |||||
} | |||||
@GET | |||||
public Response list(@Context ContainerRequest ctx) { | |||||
final DomainRoleContext drc = new DomainRoleContext(ctx); | |||||
return ok(drc.getDomain().getRoles()); | |||||
} | |||||
@GET @Path("/{role}") | |||||
public Response find(@Context ContainerRequest ctx, | |||||
@PathParam("role") String role) { | |||||
final DomainRoleContext drc = new DomainRoleContext(ctx, role); | |||||
if (drc.domainRole == null) return notFound(role); | |||||
return ok(drc.domainRole); | |||||
} | |||||
@PUT @Path("/{role}") | |||||
public Response add(@Context ContainerRequest ctx, | |||||
@PathParam("role") String role) { | |||||
final DomainRoleContext drc = new DomainRoleContext(ctx, role); | |||||
// does the domain already have a role with the same name? | |||||
if (drc.domainRole != null) { | |||||
final SemanticVersion version = getRoleVersion(role); | |||||
final SemanticVersion existingVersion = getRoleVersion(drc.domainRole); | |||||
if (existingVersion == null) return die("add: role defined without version in domain: "+drc.getDomain().getUuid()); | |||||
if (existingVersion.equals(version)) { | |||||
// same version, nothing to do | |||||
return ok(drc.domainRoles); | |||||
} else { | |||||
// different version, cannot add it, it's already there | |||||
return invalid("err.domainRole.alreadyExists", "Cannot add role "+role+" to domain "+domain.getName()+", already includes role: "+drc.domainRole); | |||||
} | |||||
} | |||||
// role is not in domain's role list. add it, commit the domain | |||||
final BubbleDomain updated = domainDAO.update(drc.getDomain().addRole(drc.role.getName())); | |||||
return ok(updated.getRoles()); | |||||
} | |||||
@POST @Path("/{role}") | |||||
public Response update(@Context ContainerRequest ctx, | |||||
@PathParam("role") String roleName, | |||||
AnsibleRole role) { | |||||
// if roleName path param has a version, use it. otherwise use the version found in the role entity | |||||
SemanticVersion version = getRoleVersion(roleName); | |||||
if (version != null) { | |||||
// ensure it matches what is in the entity | |||||
if (!role.getVersion().equals(version)) return versionMismatch(roleName, role); | |||||
} else { | |||||
version = role.getVersion(); | |||||
roleName = getRoleName(roleName)+"-"+version; | |||||
} | |||||
// roleName must match json | |||||
if (!sameRoleName(roleName, role.getName())) return invalid("err.role.invalid", "role name mismatch", roleName); | |||||
final DomainRoleContext drc = new DomainRoleContext(ctx, roleName, true); | |||||
if (drc.domainRole == null) { | |||||
log.warn("update: role not found, adding: "+roleName); | |||||
} else { | |||||
// if version is the same, no changes to make | |||||
if (version.equals(getRoleVersion(drc.domainRole))) { | |||||
log.info("update: same version, not updating"); | |||||
return ok(drc.domainRoles); | |||||
} | |||||
} | |||||
final BubbleDomain updated = domainDAO.update(drc.getDomain().updateRole(roleName, role.getName())); | |||||
log.debug("update: updated.roles="+updated.getRolesJson()); | |||||
return ok(updated.getRoles()); | |||||
} | |||||
public Response versionMismatch(@PathParam("role") String roleName, AnsibleRole role) { | |||||
return invalid("err.version.mismatch", "version in URL ("+roleName+") did not match version in object ("+role.getVersion()+")", roleName); | |||||
} | |||||
@DELETE @Path("/{role}") | |||||
public Response remove(@Context ContainerRequest ctx, | |||||
@PathParam("role") String role) { | |||||
final DomainRoleContext drc = new DomainRoleContext(ctx, role); | |||||
if (drc.domainRole == null) return notFound(role); | |||||
final BubbleDomain updated = domainDAO.update(drc.getDomain().removeRole(role)); | |||||
return ok(updated.getRoles()); | |||||
} | |||||
private class DomainRoleContext { | |||||
public Account caller; | |||||
private BubbleDomain d; | |||||
public BubbleDomain getDomain () { return d; } | |||||
public AnsibleRole role; | |||||
public String[] domainRoles; | |||||
public String domainRole; | |||||
public DomainRoleContext(ContainerRequest ctx) { this(ctx, null); } | |||||
public DomainRoleContext(ContainerRequest ctx, String roleName) { | |||||
this(ctx, roleName, false); | |||||
} | |||||
public DomainRoleContext(ContainerRequest ctx, String roleName, boolean okNotFound) { | |||||
caller = userPrincipal(ctx); | |||||
final String accountUuid = validateAccountUuid(account, ctx, caller); | |||||
d = domainDAO.findByUuid(domain.getUuid()); | |||||
if (d == null) throw notFoundEx(domain.getName()); | |||||
if (roleName != null) { | |||||
role = roleDAO.findByAccountAndId(accountUuid, roleName); | |||||
if (role == null) throw notFoundEx(roleName); | |||||
domainRoles = d.getRoles(); | |||||
domainRole = findRole(domainRoles, roleName); | |||||
// user was requesting exact version -- ensure the domainRole version matches | |||||
if (!okNotFound && roleName.contains("-") && !roleName.equals(domainRole)) throw notFoundEx(roleName); | |||||
} | |||||
} | |||||
} | |||||
} |
@@ -54,13 +54,4 @@ public class DomainsResourceBase extends AccountOwnedTemplateResource<BubbleDoma | |||||
return configuration.subResource(NodesResource.class, caller, domain); | return configuration.subResource(NodesResource.class, caller, domain); | ||||
} | } | ||||
@Path("/{id}"+EP_ROLES) | |||||
public DomainRolesResource getRoles(@Context ContainerRequest ctx, | |||||
@PathParam("id") String id) { | |||||
final Account caller = userPrincipal(ctx); | |||||
final BubbleDomain domain = find(ctx, id); | |||||
if (domain == null) throw notFoundEx(id); | |||||
return configuration.subResource(DomainRolesResource.class, caller, domain); | |||||
} | |||||
} | } |
@@ -1,20 +0,0 @@ | |||||
/** | |||||
* Copyright (c) 2020 Bubble, Inc. All rights reserved. | |||||
* For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
*/ | |||||
package bubble.resources.cloud; | |||||
import lombok.extern.slf4j.Slf4j; | |||||
import org.springframework.stereotype.Service; | |||||
import javax.ws.rs.Path; | |||||
import static bubble.ApiConstants.ROLES_ENDPOINT; | |||||
@Path(ROLES_ENDPOINT) | |||||
@Service @Slf4j | |||||
public class PublicAnsibleRolesResource extends AnsibleRolesResourceBase { | |||||
public PublicAnsibleRolesResource() { super(null); } | |||||
} |
@@ -60,7 +60,6 @@ public class ActivationService { | |||||
public static final long ACTIVATION_TIMEOUT = SECONDS.toMillis(10); | public static final long ACTIVATION_TIMEOUT = SECONDS.toMillis(10); | ||||
@Autowired private AccountSshKeyDAO sshKeyDAO; | @Autowired private AccountSshKeyDAO sshKeyDAO; | ||||
@Autowired private AnsibleRoleDAO roleDAO; | |||||
@Autowired private CloudServiceDAO cloudDAO; | @Autowired private CloudServiceDAO cloudDAO; | ||||
@Autowired private BubbleDomainDAO domainDAO; | @Autowired private BubbleDomainDAO domainDAO; | ||||
@Autowired private BubbleNetworkDAO networkDAO; | @Autowired private BubbleNetworkDAO networkDAO; | ||||
@@ -158,15 +157,9 @@ public class ActivationService { | |||||
} | } | ||||
if (errors.isInvalid()) throw invalidEx(errors); | if (errors.isInvalid()) throw invalidEx(errors); | ||||
final AnsibleRole[] roles = request.hasRoles() ? request.getRoles() : json(loadDefaultRoles(), AnsibleRole[].class); | |||||
for (AnsibleRole role : roles) { | |||||
roleDAO.create(role.setAccount(account.getUuid())); | |||||
} | |||||
final BubbleDomain domain = domainDAO.create(new BubbleDomain(request.getDomain()) | final BubbleDomain domain = domainDAO.create(new BubbleDomain(request.getDomain()) | ||||
.setAccount(account.getUuid()) | .setAccount(account.getUuid()) | ||||
.setPublicDns(publicDns.getUuid()) | .setPublicDns(publicDns.getUuid()) | ||||
.setRoles(Arrays.stream(roles).map(AnsibleRole::getName).toArray(String[]::new)) | |||||
.setTemplate(true)); | .setTemplate(true)); | ||||
BubbleFootprint footprint = footprintDAO.findByAccountAndId(account.getUuid(), DEFAULT_FOOTPRINT); | BubbleFootprint footprint = footprintDAO.findByAccountAndId(account.getUuid(), DEFAULT_FOOTPRINT); | ||||
@@ -216,13 +209,6 @@ public class ActivationService { | |||||
final BubbleNodeKey key = nodeKeyDAO.create(new BubbleNodeKey(node)); | final BubbleNodeKey key = nodeKeyDAO.create(new BubbleNodeKey(node)); | ||||
selfNodeService.setActivated(node); | selfNodeService.setActivated(node); | ||||
String[] domainRoles = request.getDomain().getRoles(); | |||||
if (domainRoles == null || domainRoles.length == 0) { | |||||
domainRoles = Arrays.stream(roles).map(AnsibleRole::getName).toArray(String[]::new); | |||||
} | |||||
domainDAO.update(domain.setRoles(domainRoles)); | |||||
selfNodeService.initThisNode(node); | selfNodeService.initThisNode(node); | ||||
configuration.refreshPublicSystemConfigs(); | configuration.refreshPublicSystemConfigs(); | ||||
@@ -4,6 +4,8 @@ | |||||
*/ | */ | ||||
package bubble.service.cloud; | package bubble.service.cloud; | ||||
import bubble.ApiConstants; | |||||
import bubble.cloud.compute.AnsibleRole; | |||||
import bubble.cloud.compute.ComputeServiceDriver; | import bubble.cloud.compute.ComputeServiceDriver; | ||||
import bubble.dao.bill.AccountPlanDAO; | import bubble.dao.bill.AccountPlanDAO; | ||||
import bubble.dao.bill.BubblePlanAppDAO; | import bubble.dao.bill.BubblePlanAppDAO; | ||||
@@ -11,42 +13,40 @@ import bubble.model.account.Account; | |||||
import bubble.model.bill.AccountPlan; | import bubble.model.bill.AccountPlan; | ||||
import bubble.model.bill.BubblePlanApp; | import bubble.model.bill.BubblePlanApp; | ||||
import bubble.model.cloud.AnsibleInstallType; | import bubble.model.cloud.AnsibleInstallType; | ||||
import bubble.model.cloud.AnsibleRole; | |||||
import bubble.model.cloud.BubbleNetwork; | import bubble.model.cloud.BubbleNetwork; | ||||
import bubble.model.cloud.BubbleNode; | import bubble.model.cloud.BubbleNode; | ||||
import bubble.server.BubbleConfiguration; | import bubble.server.BubbleConfiguration; | ||||
import bubble.service.dbfilter.DatabaseFilterService; | import bubble.service.dbfilter.DatabaseFilterService; | ||||
import com.github.jknack.handlebars.Handlebars; | import com.github.jknack.handlebars.Handlebars; | ||||
import lombok.Cleanup; | |||||
import lombok.extern.slf4j.Slf4j; | import lombok.extern.slf4j.Slf4j; | ||||
import org.apache.commons.compress.archivers.ArchiveException; | |||||
import org.cobbzilla.util.collection.NameAndValue; | import org.cobbzilla.util.collection.NameAndValue; | ||||
import org.cobbzilla.util.handlebars.HandlebarsUtil; | import org.cobbzilla.util.handlebars.HandlebarsUtil; | ||||
import org.cobbzilla.util.io.Tarball; | |||||
import org.cobbzilla.util.io.FileUtil; | |||||
import org.cobbzilla.util.io.TempDir; | import org.cobbzilla.util.io.TempDir; | ||||
import org.cobbzilla.util.string.Base64; | |||||
import org.cobbzilla.wizard.server.config.ErrorApiConfiguration; | import org.cobbzilla.wizard.server.config.ErrorApiConfiguration; | ||||
import org.cobbzilla.wizard.validation.ValidationResult; | import org.cobbzilla.wizard.validation.ValidationResult; | ||||
import org.springframework.beans.factory.annotation.Autowired; | import org.springframework.beans.factory.annotation.Autowired; | ||||
import org.springframework.stereotype.Service; | import org.springframework.stereotype.Service; | ||||
import java.io.*; | |||||
import java.io.File; | |||||
import java.io.FileWriter; | |||||
import java.io.IOException; | |||||
import java.io.Writer; | |||||
import java.util.HashMap; | import java.util.HashMap; | ||||
import java.util.List; | import java.util.List; | ||||
import java.util.Map; | import java.util.Map; | ||||
import java.util.stream.Collectors; | |||||
import static bubble.service.backup.RestoreService.RESTORE_MONITOR_SCRIPT_TIMEOUT_SECONDS; | import static bubble.service.backup.RestoreService.RESTORE_MONITOR_SCRIPT_TIMEOUT_SECONDS; | ||||
import static org.cobbzilla.util.collection.HasPriority.SORT_PRIORITY; | |||||
import static org.cobbzilla.util.daemon.ZillaRuntime.die; | import static org.cobbzilla.util.daemon.ZillaRuntime.die; | ||||
import static org.cobbzilla.util.io.FileUtil.*; | |||||
import static org.cobbzilla.util.string.Base64.DONT_GUNZIP; | |||||
import static org.cobbzilla.util.io.FileUtil.abs; | |||||
import static org.cobbzilla.util.io.FileUtil.mkdirOrDie; | |||||
import static org.cobbzilla.util.io.StreamUtil.copyClasspathDirectory; | |||||
import static org.cobbzilla.util.json.JsonUtil.json; | |||||
@Service @Slf4j | @Service @Slf4j | ||||
public class AnsiblePrepService { | public class AnsiblePrepService { | ||||
@Autowired private DatabaseFilterService dbFilter; | @Autowired private DatabaseFilterService dbFilter; | ||||
@Autowired private StandardStorageService storageService; | |||||
@Autowired private BubbleConfiguration configuration; | @Autowired private BubbleConfiguration configuration; | ||||
@Autowired private AccountPlanDAO accountPlanDAO; | @Autowired private AccountPlanDAO accountPlanDAO; | ||||
@Autowired private BubblePlanAppDAO planAppDAO; | @Autowired private BubblePlanAppDAO planAppDAO; | ||||
@@ -57,20 +57,16 @@ public class AnsiblePrepService { | |||||
BubbleNetwork network, | BubbleNetwork network, | ||||
BubbleNode node, | BubbleNode node, | ||||
ComputeServiceDriver computeDriver, | ComputeServiceDriver computeDriver, | ||||
List<AnsibleRole> roles, | |||||
ValidationResult errors, | ValidationResult errors, | ||||
File tarballDir, | |||||
boolean fork, | boolean fork, | ||||
String restoreKey) throws IOException, ArchiveException { | |||||
String restoreKey) throws IOException { | |||||
final BubbleConfiguration c = configuration; | final BubbleConfiguration c = configuration; | ||||
if (tarballDir == null) tarballDir = automation; | |||||
final AnsibleInstallType installType = network.getInstallType(); | final AnsibleInstallType installType = network.getInstallType(); | ||||
roles.sort(SORT_PRIORITY); | |||||
final List<AnsibleRole> installRoles = roles.stream() | |||||
.filter(role -> role.shouldInstall(installType)) | |||||
.collect(Collectors.toList()); | |||||
final String[] installRoles = installType == AnsibleInstallType.sage | |||||
? ApiConstants.ROLES_SAGE | |||||
: ApiConstants.ROLES_NODE; | |||||
final Map<String, Object> ctx = new HashMap<>(); | final Map<String, Object> ctx = new HashMap<>(); | ||||
final Handlebars handlebars = c.getHandlebars(); | final Handlebars handlebars = c.getHandlebars(); | ||||
@@ -94,7 +90,7 @@ public class AnsiblePrepService { | |||||
ctx.put("network", network); | ctx.put("network", network); | ||||
ctx.put("node", node); | ctx.put("node", node); | ||||
ctx.put("roles", installRoles.stream().map(AnsibleRole::getRoleName).collect(Collectors.toList())); | |||||
ctx.put("roles", installRoles); | |||||
ctx.put("testMode", !fork && configuration.testMode()); | ctx.put("testMode", !fork && configuration.testMode()); | ||||
// Determine which apps should be copied based on plan | // Determine which apps should be copied based on plan | ||||
@@ -108,34 +104,27 @@ public class AnsiblePrepService { | |||||
} | } | ||||
// Copy database with new encryption key | // Copy database with new encryption key | ||||
if (installRoles.stream().anyMatch(r->r.getName().startsWith("bubble-"))) { | |||||
final String key = dbFilter.copyDatabase(fork, network, node, account, planApps, new File(bubbleFilesDir, "bubble.sql.gz")); | |||||
ctx.put("dbEncryptionKey", key); | |||||
// if this is a fork, and current server is local, then sage will be self | |||||
if (fork && configuration.getThisNode().localIp4()) { | |||||
ctx.put("sageNode", node.getUuid()); | |||||
} else { | |||||
// otherwise, sage will be us, the node that is launching the new node | |||||
ctx.put("sageNode", configuration.getThisNode().getUuid()); | |||||
} | |||||
} | |||||
final String key = dbFilter.copyDatabase(fork, network, node, account, planApps, new File(bubbleFilesDir, "bubble.sql.gz")); | |||||
ctx.put("dbEncryptionKey", key); | |||||
for (AnsibleRole role : roles) { | |||||
@Cleanup final InputStream roleStream = getTgzInputStream(node.getAccount(), role); | |||||
if (roleStream == null) { | |||||
errors.addViolation("err.role.notFound", "roleStream was null for: "+role.getTgzB64()); | |||||
continue; | |||||
} | |||||
final File roleTarball = toFile(new File(tarballDir, role.getName() + ".tgz"), roleStream); | |||||
final File rolesDir = new File(automation, "roles"); | |||||
Tarball.unroll(roleTarball, rolesDir); | |||||
// if this is a fork, and current server is local, then sage will be self | |||||
if (fork && configuration.getThisNode().localIp4()) { | |||||
ctx.put("sageNode", node.getUuid()); | |||||
} else { | |||||
// otherwise, sage will be us, the node that is launching the new node | |||||
ctx.put("sageNode", configuration.getThisNode().getUuid()); | |||||
} | |||||
if (role.hasConfig()) { | |||||
final String roleName = role.getRoleName(); | |||||
final File roleDir = new File(abs(rolesDir)+"/"+roleName); | |||||
final File rolesDir = new File(automation, "roles"); | |||||
for (String roleName : installRoles) { | |||||
final TempDir roleTemp = copyClasspathDirectory("ansible/roles/"+roleName); | |||||
final File roleDir = new File(rolesDir, roleName); | |||||
if (!roleTemp.renameTo(roleDir)) return die("prepAnsible: error renaming role dir "+abs(roleTemp)+" -> "+abs(roleDir)); | |||||
final File bubbleRoleJson = new File(abs(roleDir)+"/files/bubble_role.json"); | |||||
if (bubbleRoleJson.exists()) { | |||||
final File varsDir = mkdirOrDie(new File(abs(roleDir)+"/vars")); | final File varsDir = mkdirOrDie(new File(abs(roleDir)+"/vars")); | ||||
final File varsMain = new File(varsDir, "main.yml"); | final File varsMain = new File(varsDir, "main.yml"); | ||||
final AnsibleRole role = json(FileUtil.toStringOrDie(bubbleRoleJson), AnsibleRole.class); | |||||
try (Writer w = new FileWriter(varsMain)) { | try (Writer w = new FileWriter(varsMain)) { | ||||
for (NameAndValue cfg : role.getConfig()) { | for (NameAndValue cfg : role.getConfig()) { | ||||
final String cfgName = cfg.getName(); | final String cfgName = cfg.getName(); | ||||
@@ -158,19 +147,8 @@ public class AnsiblePrepService { | |||||
return ctx; | return ctx; | ||||
} | } | ||||
public InputStream getTgzInputStream(String account, AnsibleRole role) { | |||||
try { | |||||
final String tgzB64 = role.getTgzB64(); | |||||
if (role.isTgzB64storage()) { | |||||
return storageService.read(role.getAccount(), role.getTgzB64()); | |||||
} else { | |||||
log.debug("getTgzInputStream: reading directly from tgzB64"); | |||||
return new ByteArrayInputStream(Base64.decode(tgzB64, DONT_GUNZIP)); | |||||
} | |||||
} catch (Exception e) { | |||||
return die("getTgzInputStream: "+e, e); | |||||
} | |||||
public static void main (String[] args) { | |||||
final TempDir tempDir = copyClasspathDirectory("ansible/roles/common"); | |||||
log.info("tempDir="+abs(tempDir)); | |||||
} | } | ||||
} | } |
@@ -47,7 +47,6 @@ public class NodeProgressMeterConstants { | |||||
public static final String METER_ERROR_PEER_LIMIT_REACHED = "BUBBLE-ERROR: PEER LIMIT REACHED"; | public static final String METER_ERROR_PEER_LIMIT_REACHED = "BUBBLE-ERROR: PEER LIMIT REACHED"; | ||||
public static final String METER_ERROR_NODE_CLOUD_NOT_FOUND = "BUBBLE-ERROR: NODE CLOUD NOT FOUND"; | public static final String METER_ERROR_NODE_CLOUD_NOT_FOUND = "BUBBLE-ERROR: NODE CLOUD NOT FOUND"; | ||||
public static final String METER_ERROR_BUBBLE_JAR_NOT_FOUND = "BUBBLE-ERROR: BUBBLE JAR NOT FOUND"; | public static final String METER_ERROR_BUBBLE_JAR_NOT_FOUND = "BUBBLE-ERROR: BUBBLE JAR NOT FOUND"; | ||||
public static final String METER_ERROR_ROLES_NOT_FOUND = "BUBBLE-ERROR: ANSIBLE ROLES NOT FOUND"; | |||||
public static final String METER_ERROR_NO_IP_OR_SSH_KEY = "BUBBLE-ERROR: NODE STARTED BUT HAS NO IP ADDRESS OR SSH KEY"; | public static final String METER_ERROR_NO_IP_OR_SSH_KEY = "BUBBLE-ERROR: NODE STARTED BUT HAS NO IP ADDRESS OR SSH KEY"; | ||||
public static final String METER_ERROR_ROLE_VALIDATION_ERRORS = "BUBBLE-ERROR: ROLE VALIDATION FAILED"; | public static final String METER_ERROR_ROLE_VALIDATION_ERRORS = "BUBBLE-ERROR: ROLE VALIDATION FAILED"; | ||||
@@ -113,7 +113,6 @@ public class StandardNetworkService implements NetworkService { | |||||
@Autowired private BubbleDomainDAO domainDAO; | @Autowired private BubbleDomainDAO domainDAO; | ||||
@Autowired private CloudServiceDAO cloudDAO; | @Autowired private CloudServiceDAO cloudDAO; | ||||
@Autowired private BubbleConfiguration configuration; | @Autowired private BubbleConfiguration configuration; | ||||
@Autowired private AnsibleRoleDAO roleDAO; | |||||
@Autowired private AccountPlanDAO accountPlanDAO; | @Autowired private AccountPlanDAO accountPlanDAO; | ||||
@Autowired private AccountPolicyDAO policyDAO; | @Autowired private AccountPolicyDAO policyDAO; | ||||
@Autowired private AccountMessageDAO accountMessageDAO; | @Autowired private AccountMessageDAO accountMessageDAO; | ||||
@@ -224,15 +223,8 @@ public class StandardNetworkService implements NetworkService { | |||||
@Cleanup("delete") final TempDir automation = new TempDir(); | @Cleanup("delete") final TempDir automation = new TempDir(); | ||||
final File bubbleFilesDir = mkdirOrDie(new File(abs(automation) + "/roles/bubble/files")); | final File bubbleFilesDir = mkdirOrDie(new File(abs(automation) + "/roles/bubble/files")); | ||||
final List<AnsibleRole> roles = roleDAO.findByAccountAndNames(account, domain.getRoles()); | |||||
if (roles.size() != domain.getRoles().length) { | |||||
progressMeter.error(METER_ERROR_ROLES_NOT_FOUND); | |||||
return die("newNode: error finding ansible roles"); | |||||
} | |||||
// build automation directory for this run | // build automation directory for this run | ||||
final ValidationResult errors = new ValidationResult(); | final ValidationResult errors = new ValidationResult(); | ||||
final File roleTgzDir = mkdirOrDie(new File(abs(bubbleFilesDir), "role_tgz")); | |||||
progressMeter.write(METER_TICK_LAUNCHING_NODE); | progressMeter.write(METER_TICK_LAUNCHING_NODE); | ||||
node.setState(BubbleNodeState.starting); | node.setState(BubbleNodeState.starting); | ||||
@@ -258,7 +250,7 @@ public class StandardNetworkService implements NetworkService { | |||||
progressMeter.write(METER_TICK_PREPARING_ROLES); | progressMeter.write(METER_TICK_PREPARING_ROLES); | ||||
final Map<String, Object> ctx = ansiblePrep.prepAnsible( | final Map<String, Object> ctx = ansiblePrep.prepAnsible( | ||||
automation, bubbleFilesDir, account, network, node, computeDriver, | automation, bubbleFilesDir, account, network, node, computeDriver, | ||||
roles, errors, roleTgzDir, nn.fork(), nn.getRestoreKey()); | |||||
errors, nn.fork(), nn.getRestoreKey()); | |||||
if (errors.isInvalid()) { | if (errors.isInvalid()) { | ||||
progressMeter.error(METER_ERROR_ROLE_VALIDATION_ERRORS); | progressMeter.error(METER_ERROR_ROLE_VALIDATION_ERRORS); | ||||
throw new MultiViolationException(errors.getViolationBeans()); | throw new MultiViolationException(errors.getViolationBeans()); | ||||
@@ -0,0 +1,33 @@ | |||||
{ | |||||
"name": "bubble", | |||||
"config": [ | |||||
{"name": "node_uuid", "value": "[[node.uuid]]"}, | |||||
{"name": "network_uuid", "value": "[[node.network]]"}, | |||||
{"name": "admin_port", "value": "[[node.adminPort]]"}, | |||||
{"name": "ssl_port", "value": "[[node.sslPort]]"}, | |||||
{"name": "public_base_uri", "value": "[[publicBaseUri]]"}, | |||||
{"name": "sage_node", "value": "[[sageNode]]"}, | |||||
{"name": "install_type", "value": "[[installType]]"}, | |||||
{"name": "promo_code_policy", "value": "[[#compare fork '==' true]][[configuration.promoCodePolicy]][[else]]disabled[[/compare]]"}, | |||||
{"name": "default_locale", "value": "[[network.locale]]"}, | |||||
{"name": "time_zone", "value": "[[network.timezone]]"}, | |||||
{"name": "bubble_version", "value": "[[configuration.version]]"}, | |||||
{"name": "bubble_host", "value": "[[node.fqdn]]"}, | |||||
{"name": "bubble_cname", "value": "[[network.networkDomain]]"}, | |||||
{"name": "admin_user", "value": "[[node.ansibleUser]]"}, | |||||
{"name": "db_encoding", "value": "UTF-8"}, | |||||
{"name": "db_locale", "value": "en_US"}, | |||||
{"name": "db_user", "value": "bubble"}, | |||||
{"name": "db_name", "value": "bubble"}, | |||||
{"name": "db_key", "value": "[[dbEncryptionKey]]"}, | |||||
{"name": "letsencrypt_email", "value": "[[configuration.letsencryptEmail]]"}, | |||||
{"name": "is_fork", "value": "[[fork]]"}, | |||||
{"name": "restore_key", "value": "[[restoreKey]]"}, | |||||
{"name": "restore_timeout", "value": "[[restoreTimeoutSeconds]]"}, | |||||
{"name": "test_mode", "value": "[[testMode]]"}, | |||||
{"name": "error_url", "value": "[[error_url]]"}, | |||||
{"name": "error_key", "value": "[[error_key]]"}, | |||||
{"name": "error_env", "value": "[[error_env]]"} | |||||
], | |||||
"optionalConfigNames": ["restore_key", "restore_timeout", "error_url", "error_key", "error_env"] | |||||
} |
@@ -0,0 +1,64 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Set system timezone | |||||
shell: timedatectl set-timezone {{ time_zone }} | |||||
- name: Install helper template scripts | |||||
template: | |||||
src: "{{ item.src }}" | |||||
dest: "/usr/local/bin/{{ item.dest }}" | |||||
owner: root | |||||
group: root | |||||
mode: 0555 | |||||
with_items: | |||||
- { src: "full_reset_db.sh.j2", dest: "full_reset_db.sh" } | |||||
- { src: "snapshot_ansible.sh.j2", dest: "snapshot_ansible.sh" } | |||||
- name: Generate keys | |||||
shell: random_password.sh /home/bubble/{{ item.file }} bubble {{ item.group }} | |||||
with_items: | |||||
- { file: '.BUBBLE_REDIS_ENCRYPTION_KEY', group: root } | |||||
- { file: '.BUBBLE_DB_ENCRYPTION_KEY', group: postgres } # postgres user needs access to DB key | |||||
- { file: '.BUBBLE_PG_PASSWORD', group: postgres } # postgres user needs access to DB password | |||||
- name: Write DB key | |||||
shell: echo -n "{{ db_key }}" > /home/bubble/.BUBBLE_DB_ENCRYPTION_KEY | |||||
- name: Write bubble env file | |||||
template: | |||||
src: bubble.env.j2 | |||||
dest: /home/bubble/bubble_{{ bubble_version }}/bubble.env | |||||
owner: bubble | |||||
group: bubble | |||||
mode: 0400 | |||||
- name: Install bubble self_node.json, sage_node.json and sage_key.json | |||||
copy: | |||||
src: "{{ item }}" | |||||
dest: /home/bubble/{{ item }} | |||||
owner: bubble | |||||
group: bubble | |||||
mode: 0600 | |||||
with_items: | |||||
- self_node.json | |||||
- sage_node.json | |||||
- sage_key.json | |||||
- name: Initialize local storage with role archive | |||||
shell: init_roles.sh | |||||
- import_tasks: postgresql_data.yml | |||||
- name: Install refresh_bubble_ssh_keys script | |||||
template: | |||||
src: refresh_bubble_ssh_keys.sh.j2 | |||||
dest: /usr/local/sbin/refresh_bubble_ssh_keys.sh | |||||
owner: root | |||||
group: root | |||||
mode: 0500 | |||||
- name: Install refresh_bubble_ssh_keys_monitor supervisor conf file | |||||
copy: | |||||
src: supervisor_refresh_bubble_ssh_keys_monitor.conf | |||||
dest: /etc/supervisor/conf.d/refresh_bubble_ssh_keys_monitor.conf |
@@ -0,0 +1,15 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Install SQL schema files | |||||
copy: | |||||
src: "{{ item }}" | |||||
dest: /home/bubble/sql/{{ item }} | |||||
owner: bubble | |||||
group: postgres | |||||
mode: 0440 | |||||
with_items: | |||||
- "bubble.sql.gz" | |||||
- name: Populate database | |||||
shell: sudo -H -u postgres bash -c "cd && full_reset_db.sh" |
@@ -0,0 +1,17 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Install restore helper scripts | |||||
copy: | |||||
src: '{{ item }}' | |||||
dest: "/usr/local/bin/{{ item }}" | |||||
owner: root | |||||
group: postgres | |||||
mode: 0550 | |||||
with_items: | |||||
- "bubble_restore_monitor.sh" | |||||
when: restore_key is defined | |||||
- name: Start restore monitor | |||||
shell: bash -c 'nohup /usr/local/bin/bubble_restore_monitor.sh {{ admin_port }} {{ restore_timeout }} > /dev/null &' | |||||
when: restore_key is defined |
@@ -0,0 +1,12 @@ | |||||
export PUBLIC_BASE_URI={{ public_base_uri }} | |||||
export BUBBLE_ASSETS_DIR=/home/bubble/site | |||||
export SELF_NODE={{ node_uuid }} | |||||
export SAGE_NODE={{ sage_node }} | |||||
export BUBBLE_PROMO_CODE_POLICY={{ promo_code_policy }} | |||||
export LETSENCRYPT_EMAIL={{ letsencrypt_email }} | |||||
export BUBBLE_SERVER_PORT={{ admin_port }} | |||||
export BUBBLE_TEST_MODE={{ test_mode }} | |||||
export BUBBLE_DEFAULT_LOCALE={{ default_locale }} | |||||
export ERRBIT_URL={{ error_url | default('') }} | |||||
export ERRBIT_KEY={{ error_key | default('') }} | |||||
export ERRBIT_ENV={{ error_env | default('') }} |
@@ -0,0 +1,19 @@ | |||||
#!/bin/bash | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
exit 1 | |||||
} | |||||
if [[ $(whoami) == "root" ]] ; then | |||||
su - postgres ${0} ${@} | |||||
exit $? | |||||
fi | |||||
if [[ $(whoami) != "postgres" ]] ; then | |||||
die "${0} : must be run as postgres user" | |||||
fi | |||||
cd ~bubble/sql \ | |||||
&& init_bubble_db.sh {{ db_name }} {{ db_user }} {{ is_fork }} {{ install_type }} ${1} \ | |||||
|| die "error reinitializing database" |
@@ -0,0 +1,65 @@ | |||||
#!/bin/bash | |||||
LOG=/tmp/bubble.refresh_bubble_ssh_keys.log | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
log "${1}" | |||||
exit 1 | |||||
} | |||||
function log { | |||||
echo "$(date): ${1}" >> ${LOG} | |||||
} | |||||
CURRENT_KEYS_SQL=' | |||||
SELECT k.ssh_public_key | |||||
FROM account_ssh_key k, account a | |||||
WHERE a.uuid = k.account | |||||
AND a.admin = true | |||||
AND k.install_ssh_key = true | |||||
AND (k.expiration is null or k.expiration < 1000*extract(epoch from now()))' | |||||
AUTH_KEYS="/root/.ssh/authorized_keys" | |||||
NEW_KEYS=$(mktemp /root/.ssh/authorized_keys.XXXXXXX) | |||||
chmod 600 ${NEW_KEYS} || die "Error setting permissions on new authorized_keys file: ${NEW_KEYS}" | |||||
KEY_COUNT=0 | |||||
for key in $(echo "${CURRENT_KEYS_SQL}" | PGPASSWORD="$(cat /home/bubble/.BUBBLE_PG_PASSWORD)" psql -U bubble -h 127.0.0.1 bubble -qt) ; do | |||||
if [[ -z "$(echo "${key}" | tr -d [[:space:]])" ]] ; then | |||||
continue | |||||
fi | |||||
KEY="$(bdecrypt "${key}" 2> /dev/null)" | |||||
if [[ ! -z "${KEY}" && "${KEY}" == ssh-rsa* ]] ; then | |||||
log "Adding authorized key: $(echo "${KEY}" | tr -d '\n')" | |||||
echo "${KEY}" >> ${NEW_KEYS} | |||||
KEY_COUNT=$(expr ${KEY_COUNT} + 1) | |||||
else | |||||
log "Warning: NOT adding malformed key: $(echo "${KEY}" | tr -d '\n')" | |||||
fi | |||||
done | |||||
if [[ ${KEY_COUNT} -eq 0 ]] ; then | |||||
# Sanity check that we can even talk to psql | |||||
# We may be out of memory, in which case we do not want to erase existing installed keys | |||||
if [[ -z "$(echo 'SELECT count(*) FROM account_ssh_key' | PGPASSWORD="$(cat /home/bubble/.BUBBLE_PG_PASSWORD)" psql -U bubble -h 127.0.0.1 bubble)" ]] ; then | |||||
log "Warning: error calling psql, not installing/uninstalling any keys" | |||||
exit 0 | |||||
fi | |||||
fi | |||||
# Retain self-generated ansible setup key | |||||
ANSIBLE_USER="{{admin_user}}" | |||||
if [[ ! -z "${ANSIBLE_USER}" ]] ; then | |||||
PUB_FILE="$(cd ~{{admin_user}} && pwd)/.ssh/bubble_rsa.pub" | |||||
if [[ -f "${PUB_FILE}" ]] ; then | |||||
cat "${PUB_FILE}" >> ${NEW_KEYS} | |||||
fi | |||||
else | |||||
log "Warning: No ansible user defined, unable to retain setup key" | |||||
fi | |||||
mv ${NEW_KEYS} ${AUTH_KEYS} || die "Error moving ${NEW_KEYS} -> ${AUTH_KEYS}" | |||||
log "Installed ${KEY_COUNT} authorized SSH keys: ${NEW_KEYS} -> ${AUTH_KEYS}" |
@@ -0,0 +1,34 @@ | |||||
#!/bin/bash | |||||
SCRIPT="${0}" | |||||
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd) | |||||
LOG=/tmp/$(basename ${0}).log | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
log "${1}" | |||||
exit 1 | |||||
} | |||||
function log { | |||||
echo "${1}" | tee -a ${LOG} | |||||
} | |||||
if [[ $(whoami) != "{{ admin_user }}" ]] ; then | |||||
if [[ $(whoami) == "root" ]] ; then | |||||
sudo -H -u "{{ admin_user }}" ${0} | |||||
exit $? | |||||
fi | |||||
die "${0} must be run as {{ admin_user }}" | |||||
fi | |||||
ANSIBLE_USER_HOME=$(cd ~{{ admin_user }} && pwd) | |||||
ANSIBLE_SNAPSHOT="/home/bubble/ansible.tgz" | |||||
cd ${ANSIBLE_USER_HOME} \ | |||||
&& tar czf ${ANSIBLE_SNAPSHOT} ./ansible \ | |||||
&& chmod 400 ${ANSIBLE_SNAPSHOT} \ | |||||
&& chown bubble ${ANSIBLE_SNAPSHOT} \ | |||||
|| die "Error creating ansible snapshot" |
@@ -0,0 +1,10 @@ | |||||
{ | |||||
"name": "bubble_finalizer", | |||||
"config": [ | |||||
{"name": "server_alias", "value": "[[network.networkDomain]]"}, | |||||
{"name": "restore_key", "value": "[[restoreKey]]"}, | |||||
{"name": "install_type", "value": "[[installType]]"}, | |||||
{"name": "bubble_java_opts", "value": "-XX:MaxRAM=[[expr nodeSize.memoryMB '//' '2.625']]m"} | |||||
], | |||||
"optionalConfigNames": ["restore_key"] | |||||
} |
@@ -0,0 +1,38 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Snapshot ansible roles | |||||
shell: snapshot_ansible.sh | |||||
- name: Touch first-time setup file | |||||
shell: su - bubble bash -c "if [[ ! -f /home/bubble/first_time_marker ]] ; then echo -n install > /home/bubble/first_time_marker ; fi" | |||||
when: restore_key is not defined | |||||
- name: Touch first-time setup file (restore) | |||||
shell: su - bubble bash -c "if [[ ! -f /home/bubble/first_time_marker ]] ; then echo -n restore > /home/bubble/first_time_marker ; fi" | |||||
when: restore_key is defined | |||||
- name: Install mitmproxy CA cert in local CA store | |||||
shell: install_cert.sh /home/mitmproxy/.mitmproxy/{{ server_alias }}-ca-cert.pem 600 | |||||
when: install_type == 'node' | |||||
- name: Install mitmproxy public certs in bubble dir | |||||
shell: /usr/local/bin/copy_certs_to_bubble.sh {{ server_alias }} | |||||
when: install_type == 'node' | |||||
- name: Install bubble supervisor conf file | |||||
template: | |||||
src: "supervisor_bubble.conf.j2" | |||||
dest: /etc/supervisor/conf.d/bubble.conf | |||||
# We cannot receive notifications until nginx is running, so start bubble API as the very last step | |||||
- name: Ensure bubble and bubble_nodemanager are started | |||||
supervisorctl: | |||||
name: '{{ item }}' | |||||
state: restarted | |||||
with_items: | |||||
- bubble | |||||
- nodemanager | |||||
- name: Ensure authorized SSH keys are up-to-date | |||||
shell: su - bubble bash -c "touch /home/bubble/.refresh_ssh_keys" |
@@ -0,0 +1,10 @@ | |||||
[program:bubble] | |||||
stdout_logfile = /home/bubble/logs/bubble-out.log | |||||
stderr_logfile = /home/bubble/logs/bubble-err.log | |||||
command=sudo -u bubble bash -c "/usr/bin/java \ | |||||
-Dfile.encoding=UTF-8 -Djava.net.preferIPv4Stack=true \ | |||||
-XX:+UseG1GC -XX:MaxGCPauseMillis=400 {{ bubble_java_opts }} \ | |||||
-cp /home/bubble/current/bubble.jar \ | |||||
bubble.server.BubbleServer \ | |||||
/home/bubble/current/bubble.env" |
@@ -0,0 +1,6 @@ | |||||
{ | |||||
"name": "common", | |||||
"config": [ | |||||
{"name": "hostname", "value": "[[node.fqdn]]"} | |||||
] | |||||
} |
@@ -0,0 +1,6 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Set hostname to {{ hostname }} | |||||
hostname: | |||||
name: '{{ hostname }}' |
@@ -0,0 +1,11 @@ | |||||
{ | |||||
"name": "nginx", | |||||
"config": [ | |||||
{"name": "server_name", "value": "[[node.fqdn]]"}, | |||||
{"name": "server_alias", "value": "[[network.networkDomain]]"}, | |||||
{"name": "letsencrypt_email", "value": "[[configuration.letsencryptEmail]]"}, | |||||
{"name": "ssl_port", "value": "[[node.sslPort]]"}, | |||||
{"name": "admin_port", "value": "[[node.adminPort]]"}, | |||||
{"name": "install_type", "value": "[[installType]]"} | |||||
] | |||||
} |
@@ -0,0 +1,22 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Init certbot | |||||
shell: init_certbot.sh {{ letsencrypt_email }} {{ server_name }} {{ server_alias }} | |||||
# see https://weakdh.org/sysadmin.html | |||||
- name: Create a strong dhparam.pem | |||||
shell: openssl dhparam -out /etc/nginx/dhparams.pem 2048 | |||||
args: | |||||
creates: /etc/nginx/dhparams.pem | |||||
- name: Create dhparam nginx conf | |||||
template: src=stronger_dhparams.conf dest=/etc/nginx/conf.d/stronger_dhparams.conf | |||||
- include: site.yml | |||||
- meta: flush_handlers # nginx has to be restarted right now if it has to | |||||
- name: Ensure nginx is restarted | |||||
service: | |||||
name: nginx | |||||
state: restarted |
@@ -0,0 +1,40 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Disable default site | |||||
file: | |||||
path: /etc/nginx/sites-enabled/default | |||||
state: absent | |||||
- name: Create the .well-known directory | |||||
file: | |||||
path: /var/www/html/.well-known | |||||
owner: www-data | |||||
group: www-data | |||||
state: directory | |||||
- name: Create default nginx site (type={{ install_type }}) | |||||
template: | |||||
src: "site_{{ install_type }}.conf.j2" | |||||
dest: "/etc/nginx/sites-available/{{ server_name }}.conf" | |||||
- name: Create alias nginx site (type={{ install_type }}) | |||||
template: | |||||
src: "site_{{ install_type }}_alias.conf.j2" | |||||
dest: "/etc/nginx/sites-available/{{ server_alias }}.conf" | |||||
- name: Symlink default site to site-enabled | |||||
file: | |||||
src: /etc/nginx/sites-available/{{ server_name }}.conf | |||||
dest: /etc/nginx/sites-enabled/{{ server_name }}.conf | |||||
owner: root | |||||
group: root | |||||
state: link | |||||
- name: Symlink alias site to site-enabled | |||||
file: | |||||
src: /etc/nginx/sites-available/{{ server_alias }}.conf | |||||
dest: /etc/nginx/sites-enabled/{{ server_alias }}.conf | |||||
owner: root | |||||
group: root | |||||
state: link |
@@ -0,0 +1,54 @@ | |||||
server { | |||||
server_name {{ server_name }}; | |||||
listen 80; | |||||
listen 1080; | |||||
listen {{ ssl_port }} ssl http2; | |||||
listen 443 ssl http2; | |||||
client_max_body_size 200M; | |||||
root /home/bubble/site/; | |||||
index index.html; | |||||
location / { | |||||
rewrite ^/(\w+/)+(?<basename>.*)$ /$basename break; | |||||
try_files $uri /index.html =404; | |||||
} | |||||
location /api { | |||||
proxy_pass http://127.0.0.1:{{ admin_port }}/api; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location /nodeman { | |||||
return 302 /nodeman/; | |||||
} | |||||
location /nodeman/ { | |||||
proxy_pass http://127.0.0.1:7800/; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location ^~ /.well-known/acme-challenge/ { | |||||
default_type "text/plain"; | |||||
root /var/www/html; | |||||
} | |||||
ssl_certificate /etc/letsencrypt/live/{{ server_name }}/fullchain.pem; | |||||
ssl_certificate_key /etc/letsencrypt/live/{{ server_name }}/privkey.pem; | |||||
ssl_session_cache shared:le_nginx_SSL:1m; | |||||
ssl_session_timeout 1440m; | |||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; | |||||
ssl_prefer_server_ciphers on; | |||||
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-ECDSA-AES128-SHA ECDHE-ECDSA-AES256-SHA ECDHE-ECDSA-AES128-SHA256 ECDHE-ECDSA-AES256-SHA384 ECDHE-RSA-AES128-GCM-SHA256 ECDHE-RSA-AES256-GCM-SHA384 ECDHE-RSA-AES128-SHA ECDHE-RSA-AES128-SHA256 ECDHE-RSA-AES256-SHA384 DHE-RSA-AES128-GCM-SHA256 DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES128-SHA DHE-RSA-AES256-SHA DHE-RSA-AES128-SHA256 DHE-RSA-AES256-SHA256 EDH-RSA-DES-CBC3-SHA"; | |||||
if ($scheme != "https") { | |||||
return 301 https://$host:{{ ssl_port }}$request_uri; | |||||
} | |||||
} |
@@ -0,0 +1,54 @@ | |||||
server { | |||||
server_name {{ server_alias }}; | |||||
listen 80; | |||||
listen 1080; | |||||
listen {{ ssl_port }} ssl http2; | |||||
listen 443 ssl http2; | |||||
client_max_body_size 200M; | |||||
root /home/bubble/site/; | |||||
index index.html; | |||||
location / { | |||||
rewrite ^/(\w+/)+(?<basename>.*)$ /$basename break; | |||||
try_files $uri /index.html =404; | |||||
} | |||||
location /api { | |||||
proxy_pass http://127.0.0.1:{{ admin_port }}/api; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location /nodeman { | |||||
return 302 /nodeman/; | |||||
} | |||||
location /nodeman/ { | |||||
proxy_pass http://127.0.0.1:7800/; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location ^~ /.well-known/acme-challenge/ { | |||||
default_type "text/plain"; | |||||
root /var/www/html; | |||||
} | |||||
ssl_certificate /etc/letsencrypt/live/{{ server_alias }}/fullchain.pem; | |||||
ssl_certificate_key /etc/letsencrypt/live/{{ server_alias }}/privkey.pem; | |||||
ssl_session_cache shared:le_nginx_SSL:1m; | |||||
ssl_session_timeout 1440m; | |||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; | |||||
ssl_prefer_server_ciphers on; | |||||
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-ECDSA-AES128-SHA ECDHE-ECDSA-AES256-SHA ECDHE-ECDSA-AES128-SHA256 ECDHE-ECDSA-AES256-SHA384 ECDHE-RSA-AES128-GCM-SHA256 ECDHE-RSA-AES256-GCM-SHA384 ECDHE-RSA-AES128-SHA ECDHE-RSA-AES128-SHA256 ECDHE-RSA-AES256-SHA384 DHE-RSA-AES128-GCM-SHA256 DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES128-SHA DHE-RSA-AES256-SHA DHE-RSA-AES128-SHA256 DHE-RSA-AES256-SHA256 EDH-RSA-DES-CBC3-SHA"; | |||||
if ($scheme != "https") { | |||||
return 301 https://$host:{{ ssl_port }}$request_uri; | |||||
} | |||||
} |
@@ -0,0 +1,52 @@ | |||||
server { | |||||
listen 80; | |||||
server_name {{ server_name }}; | |||||
client_max_body_size 200M; | |||||
root /home/bubble/site/; | |||||
index index.html; | |||||
location / { | |||||
rewrite ^/(\w+/)+(?<basename>.*)$ /$basename break; | |||||
try_files $uri /index.html =404; | |||||
} | |||||
location /api { | |||||
proxy_pass http://127.0.0.1:{{ admin_port }}/api; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location /nodeman { | |||||
return 302 /nodeman/; | |||||
} | |||||
location /nodeman/ { | |||||
proxy_pass http://127.0.0.1:7800/; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location ^~ /.well-known/acme-challenge/ { | |||||
default_type "text/plain"; | |||||
root /var/www/html; | |||||
} | |||||
listen 443 ssl; | |||||
ssl_certificate /etc/letsencrypt/live/{{ server_name }}/fullchain.pem; | |||||
ssl_certificate_key /etc/letsencrypt/live/{{ server_name }}/privkey.pem; | |||||
ssl_session_cache shared:le_nginx_SSL:1m; | |||||
ssl_session_timeout 1440m; | |||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; | |||||
ssl_prefer_server_ciphers on; | |||||
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-ECDSA-AES128-SHA ECDHE-ECDSA-AES256-SHA ECDHE-ECDSA-AES128-SHA256 ECDHE-ECDSA-AES256-SHA384 ECDHE-RSA-AES128-GCM-SHA256 ECDHE-RSA-AES256-GCM-SHA384 ECDHE-RSA-AES128-SHA ECDHE-RSA-AES128-SHA256 ECDHE-RSA-AES256-SHA384 DHE-RSA-AES128-GCM-SHA256 DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES128-SHA DHE-RSA-AES256-SHA DHE-RSA-AES128-SHA256 DHE-RSA-AES256-SHA256 EDH-RSA-DES-CBC3-SHA"; | |||||
if ($scheme != "https") { | |||||
return 301 https://$host$request_uri; | |||||
} | |||||
} |
@@ -0,0 +1,52 @@ | |||||
server { | |||||
listen 80; | |||||
server_name {{ server_alias }}; | |||||
client_max_body_size 200M; | |||||
root /home/bubble/site/; | |||||
index index.html; | |||||
location / { | |||||
rewrite ^/(\w+/)+(?<basename>.*)$ /$basename break; | |||||
try_files $uri /index.html =404; | |||||
} | |||||
location /api { | |||||
proxy_pass http://127.0.0.1:{{ admin_port }}/api; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location /nodeman { | |||||
return 302 /nodeman/; | |||||
} | |||||
location /nodeman/ { | |||||
proxy_pass http://127.0.0.1:7800/; | |||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |||||
proxy_set_header X-Real-IP $remote_addr; | |||||
proxy_set_header X-Forwarded-Host {{ server_name }}; | |||||
proxy_set_header X-Forwarded-Proto https; | |||||
} | |||||
location ^~ /.well-known/acme-challenge/ { | |||||
default_type "text/plain"; | |||||
root /var/www/html; | |||||
} | |||||
listen 443 ssl; | |||||
ssl_certificate /etc/letsencrypt/live/{{ server_alias }}/fullchain.pem; | |||||
ssl_certificate_key /etc/letsencrypt/live/{{ server_alias }}/privkey.pem; | |||||
ssl_session_cache shared:le_nginx_SSL:1m; | |||||
ssl_session_timeout 1440m; | |||||
ssl_protocols TLSv1 TLSv1.1 TLSv1.2; | |||||
ssl_prefer_server_ciphers on; | |||||
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256 ECDHE-ECDSA-AES256-GCM-SHA384 ECDHE-ECDSA-AES128-SHA ECDHE-ECDSA-AES256-SHA ECDHE-ECDSA-AES128-SHA256 ECDHE-ECDSA-AES256-SHA384 ECDHE-RSA-AES128-GCM-SHA256 ECDHE-RSA-AES256-GCM-SHA384 ECDHE-RSA-AES128-SHA ECDHE-RSA-AES128-SHA256 ECDHE-RSA-AES256-SHA384 DHE-RSA-AES128-GCM-SHA256 DHE-RSA-AES256-GCM-SHA384 DHE-RSA-AES128-SHA DHE-RSA-AES256-SHA DHE-RSA-AES128-SHA256 DHE-RSA-AES256-SHA256 EDH-RSA-DES-CBC3-SHA"; | |||||
if ($scheme != "https") { | |||||
return 301 https://$host$request_uri; | |||||
} | |||||
} |
@@ -0,0 +1 @@ | |||||
ssl_dhparam /etc/nginx/dhparams.pem; |
@@ -256,6 +256,19 @@ | |||||
{"name": "medium", "type": "medium", "internalName": "s-1vcpu-2gb", "vcpu": 1, "memoryMB": 2048, "ssdGB": 50}, | {"name": "medium", "type": "medium", "internalName": "s-1vcpu-2gb", "vcpu": 1, "memoryMB": 2048, "ssdGB": 50}, | ||||
{"name": "large", "type": "large", "internalName": "s-2vcpu-4gb", "vcpu": 2, "memoryMB": 4096, "ssdGB": 80} | {"name": "large", "type": "large", "internalName": "s-2vcpu-4gb", "vcpu": 2, "memoryMB": 4096, "ssdGB": 80} | ||||
], | ], | ||||
"packer": { | |||||
"vars": [{"name": "DIGITALOCEAN_API_KEY", "value": "credentials.apiKey"}], | |||||
"builder": { | |||||
"type": "digitalocean", | |||||
"ssh_username": "root", | |||||
"api_token": "<<user `DIGITALOCEAN_API_KEY`>>", | |||||
"image": "ubuntu-18-04-x64", | |||||
"region": "[[region]]", | |||||
"size": "s-1vcpu-1gb", | |||||
"ipv6": true, | |||||
"tags": ["packer-bubble"] | |||||
} | |||||
}, | |||||
"config": [{"name": "os", "value": "ubuntu-18-04-x64"}] | "config": [{"name": "os", "value": "ubuntu-18-04-x64"}] | ||||
}, | }, | ||||
"credentials": { | "credentials": { | ||||
@@ -0,0 +1,2 @@ | |||||
[bubble] | |||||
127.0.0.1 ansible_python_interpreter=/usr/bin/python3 |
@@ -0,0 +1,25 @@ | |||||
--- | |||||
- name: Create new bubble sage node | |||||
hosts: bubble | |||||
remote_user: root | |||||
gather_facts: no | |||||
vars: | |||||
install_type: sage | |||||
pre_tasks: | |||||
- name: apt install python3 and python3-pip | |||||
raw: sudo apt-get -y install python3 python3-pip virtualenv | |||||
- name: pip install setuptools and psycopg2-binary | |||||
raw: sudo pip3 install setuptools psycopg2-binary | |||||
- name: gather facts | |||||
setup: | |||||
roles: | |||||
- common | |||||
- firewall | |||||
- nginx | |||||
- bubble | |||||
- bubble_finalizer |
@@ -0,0 +1,34 @@ | |||||
{ | |||||
"variables": { | |||||
[[#each packer.vars]]"[[name]]": "{{env `[[name]]`}}"[[#unless @last]], | |||||
[[/unless]][[/each]] | |||||
}, | |||||
"builders": [ | |||||
[[#each packer.builders]][[json this]][[#unless @last]], | |||||
[[/unless]][[/each]] | |||||
], | |||||
"provisioners": [ | |||||
{ | |||||
"type": "shell", | |||||
"inline": [ | |||||
"sleep 30", | |||||
"sudo apt-get -y update", | |||||
"sudo apt-get -y upgrade", | |||||
"sudo apt-get -y install python3 python3-pip virtualenv", | |||||
"sudo pip3 install setuptools psycopg2-binary ansible" | |||||
] | |||||
}, | |||||
{ | |||||
"type": "ansible-local", | |||||
"playbook_file": "packer-sage-playbook.yml", | |||||
"role_paths": ["."], | |||||
"inventory_file": "hosts" | |||||
} | |||||
], | |||||
"post-processors": [ | |||||
{ | |||||
"type": "manifest", | |||||
"output": "manifest.json" | |||||
} | |||||
] | |||||
} |
@@ -0,0 +1,5 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
PGPASSWORD="$(cat /home/bubble/.BUBBLE_PG_PASSWORD)" psql -U bubble -h 127.0.0.1 bubble "${@}" |
@@ -0,0 +1,142 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
BUBBLE_HOME="/home/bubble" | |||||
RESTORE_MARKER="${BUBBLE_HOME}/.restore" | |||||
RESTORE_RUN_MARKER="${BUBBLE_HOME}/.restore_run" | |||||
SELF_NODE="self_node.json" | |||||
BUBBLE_SELF_NODE="${BUBBLE_HOME}/${SELF_NODE}" | |||||
ADMIN_PORT=${1:?no admin port provided} | |||||
TIMEOUT=${2:-3600} # 60 minutes default timeout | |||||
LOG=/tmp/bubble.restore.log | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
log "${1}" | |||||
exit 1 | |||||
} | |||||
function log { | |||||
echo "${1}" >> ${LOG} | |||||
} | |||||
START=$(date +%s) | |||||
while [[ ! -f "${RESTORE_MARKER}" ]] ; do | |||||
sleep 5 | |||||
if [[ $(expr $(date +%s) - ${START}) -gt ${TIMEOUT} ]] ; then | |||||
break | |||||
fi | |||||
done | |||||
if [[ ! -f "${RESTORE_MARKER}" ]] ; then | |||||
die "Restore marker was never created: ${RESTORE_MARKER}" | |||||
fi | |||||
# was a restore already attempted? only one attempt is allowed. start another restore (with a new node) if you need to try again | |||||
if [[ -f ${RESTORE_RUN_MARKER} ]] ; then | |||||
die "Restore was already attempted, cannot attempt again" | |||||
fi | |||||
touch ${RESTORE_RUN_MARKER} | |||||
# Ensure there is only one self_node.json in the backup. Otherwise maybe we have more than once backup, can't restore. | |||||
SELF_NODE_COUNT=$(find ${BUBBLE_HOME}/restore -type f -name "${SELF_NODE}" | wc -l | tr -d ' ') | |||||
if [[ ${SELF_NODE_COUNT} -eq 0 ]] ; then | |||||
die "Cannot restore, restore base could not be determined (no ${SELF_NODE} found under ${BUBBLE_HOME}/restore)" | |||||
elif [[ ${SELF_NODE_COUNT} -gt 1 ]] ; then | |||||
die "Cannot restore, restore base could not be determined (multiple ${SELF_NODE} files found under ${BUBBLE_HOME}/restore): $(find ${BUBBLE_HOME}/restore -type f -name "${SELF_NODE}")" | |||||
fi | |||||
# set RESTORE_BASE, ensure it is set | |||||
RESTORE_BASE=$(dirname $(find ${BUBBLE_HOME}/restore -type f -name "${SELF_NODE}" | head -1)) | |||||
if [[ -z "${RESTORE_BASE}" ]] ; then | |||||
die "Cannot restore, restore base could not be determined (no ${SELF_NODE} found under ${BUBBLE_HOME}/restore)" | |||||
fi | |||||
# stop bubble service | |||||
log "Stopping bubble service" | |||||
supervisorctl stop bubble | |||||
# stop mitmdump service | |||||
log "Stopping mitmproxy service" | |||||
supervisorctl stop mitmdump | |||||
# restore bubble.jar | |||||
log "Restoring bubble.jar" | |||||
cp ${RESTORE_BASE}/bubble.jar ${BUBBLE_HOME}/current/bubble.jar | |||||
# set wasRestored flag in self_node.json | |||||
log "Adding wasRestored=true to ${SELF_NODE}" | |||||
TEMP_SELF=$(mktemp /tmp/self_node.XXXXXXX.json) | |||||
cat ${BUBBLE_SELF_NODE} | jq '.wasRestored = true' > ${TEMP_SELF} || die "Error adding 'wasRestored' flag to ${SELF_NODE}" | |||||
cat ${TEMP_SELF} > ${BUBBLE_SELF_NODE} || die "Error rewriting ${SELF_NODE}" | |||||
log "Setting ownership of json files to bubble user" | |||||
chown bubble ${BUBBLE_HOME}/*.json || die "Error changing ownership of json files to bubble user" | |||||
# restore dot files | |||||
log "Restoring bubble dotfiles" | |||||
cp ${RESTORE_BASE}/dotfiles/.BUBBLE_* ${BUBBLE_HOME}/ || die "Error restoring dotfiles" | |||||
# restore mitm configs | |||||
log "Restoring mitm certs" | |||||
cp -R ${RESTORE_BASE}/mitm_certs ${BUBBLE_HOME}/ || die "Error restoring mitm certs" | |||||
# drop and recreate database from backup (but preserve bubble_node and bubble_node_key for current node) | |||||
log "Restoring bubble database" | |||||
cp ${RESTORE_BASE}/bubble.sql.gz ${BUBBLE_HOME}/sql/ \ | |||||
&& chown -R bubble ${BUBBLE_HOME}/sql \ | |||||
&& chgrp -R postgres ${BUBBLE_HOME}/sql \ | |||||
&& chmod 550 ${BUBBLE_HOME}/sql \ | |||||
&& chmod 440 ${BUBBLE_HOME}/sql/* || die "Error restoring bubble database archive" | |||||
su - postgres bash -c "cd ${BUBBLE_HOME}/sql && full_reset_db.sh drop" || die "Error restoring database" | |||||
# Remove old keys | |||||
log "Removing node keys" | |||||
echo "DELETE FROM bubble_node_key" | bsql.sh | |||||
# restore local storage | |||||
log "Restoring bubble LocalStorage" | |||||
rm -rf ${BUBBLE_HOME}/.bubble_local_storage/* && rsync -ac ${RESTORE_BASE}/LocalStorage/* ${BUBBLE_HOME}/.bubble_local_storage/ || die "Error restoring LocalStorage" | |||||
# flush redis | |||||
log "Flushing redis" | |||||
echo "FLUSHALL" | redis-cli || die "Error flushing redis" | |||||
# restore algo configs | |||||
CONFIGS_BACKUP=/home/bubble/.BUBBLE_ALGO_CONFIGS.tgz | |||||
if [[ ! -f ${CONFIGS_BACKUP} ]] ; then | |||||
log "Warning: Algo VPN configs backup not found: ${CONFIGS_BACKUP}, not installing algo" | |||||
else | |||||
ALGO_BASE=/root/ansible/roles/algo/algo | |||||
if [[ ! -d ${ALGO_BASE} ]] ; then | |||||
die "Error restoring Algo VPN: directory ${ALGO_BASE} not found" | |||||
fi | |||||
cd ${ALGO_BASE} && tar xzf ${CONFIGS_BACKUP} || die "Error restoring algo VPN configs" | |||||
# install/configure algo | |||||
${ALGO_BASE}/install_algo.sh || die "Error configuring or installing algo VPN" | |||||
# ensure user monitor is running | |||||
supervisorctl restart algo_refresh_users_monitor | |||||
fi | |||||
# restart mitm proxy service | |||||
log "Restarting mitmproxy" | |||||
supervisorctl restart mitmdump | |||||
# restart bubble service | |||||
log "Restore complete: restarting bubble API" | |||||
supervisorctl restart bubble | |||||
# verify service is running OK | |||||
log "Pausing for a bit, then verifying bubble server has successfully restarted after restore" | |||||
sleep 60 | |||||
curl https://$(hostname):${ADMIN_PORT}/api/.bubble || log "Error restarting bubble server" | |||||
# remove restore markers, we are done | |||||
log "Cleaning up temp files" | |||||
rm -f ${RESTORE_MARKER} ${RESTORE_RUN_MARKER} |
@@ -0,0 +1,142 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
echo "$@" > /tmp/init.args | |||||
LOG=/dev/null | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
log "${1}" | |||||
exit 1 | |||||
} | |||||
function log { | |||||
echo "${1}" >> ${LOG} | |||||
} | |||||
export LANG="en_US.UTF-8" | |||||
export LANGUAGE="en_US.UTF-8" | |||||
export LC_CTYPE="en_US.UTF-8" | |||||
export LC_NUMERIC="en_US.UTF-8" | |||||
export LC_TIME="en_US.UTF-8" | |||||
export LC_COLLATE="en_US.UTF-8" | |||||
export LC_MONETARY="en_US.UTF-8" | |||||
export LC_MESSAGES="en_US.UTF-8" | |||||
export LC_PAPER="en_US.UTF-8" | |||||
export LC_NAME="en_US.UTF-8" | |||||
export LC_ADDRESS="en_US.UTF-8" | |||||
export LC_TELEPHONE="en_US.UTF-8" | |||||
export LC_MEASUREMENT="en_US.UTF-8" | |||||
export LC_IDENTIFICATION="en_US.UTF-8" | |||||
export LC_ALL=en_US.UTF-8 | |||||
if [[ "$(whoami)" != "postgres" ]] ; then | |||||
echo "Must be run as postgres user" | |||||
exit 1 | |||||
fi | |||||
DB_NAME=${1:?no db name provided} | |||||
DB_USER=${2:?no db user provided} | |||||
IS_FORK=${3:?no fork argument provided} | |||||
INSTALL_MODE=${4:?no install mode provided} | |||||
DROP_AND_RECREATE=${5} | |||||
BUBBLE_HOME=/home/bubble | |||||
BUBBLE_JAR=/home/bubble/current/bubble.jar | |||||
if [[ ! -f ${BUBBLE_JAR} ]] ; then | |||||
die "Bubble jar not found: ${BUBBLE_JAR}" | |||||
fi | |||||
function user_exists { | |||||
username="${1}" | |||||
num_users="$(echo "select count(*) from pg_user where usename='${username}'" | psql -qt | egrep -v '^$')" | |||||
if [[ -z "${num_users}" || ${num_users} -eq 0 ]] ; then | |||||
echo "0" | |||||
else | |||||
echo "1" | |||||
fi | |||||
} | |||||
function db_exists { | |||||
dbname="${1}" | |||||
num_dbs="$(echo "select count(*) from pg_database where datname='${dbname}'" | psql -qt | egrep -v '^$')" | |||||
if [[ -z "${num_dbs}" || ${num_dbs} -eq 0 ]] ; then | |||||
echo "0" | |||||
else | |||||
echo "1" | |||||
fi | |||||
} | |||||
function count_table_rows { | |||||
dbname="${1}" | |||||
tname="${2}" | |||||
num_rows="$(echo "select count(*) from ${tname}" | psql -qt ${dbname} | egrep -v '^$')" | |||||
if [[ -z "${num_rows}" ]] ; then | |||||
die "count_table_rows: error counting rows for table ${tname}" | |||||
fi | |||||
echo ${num_rows} | |||||
} | |||||
if [[ ! -z "${DROP_AND_RECREATE}" && "${DROP_AND_RECREATE}" == "drop" ]] ; then | |||||
dropdb ${DB_NAME} || echo "error dropping DB ${DB_NAME} (will continue)" | |||||
dropuser ${DB_USER} || echo "error dropping DB user ${DB_USER} (will continue)" | |||||
uuid > ${BUBBLE_HOME}/.BUBBLE_PG_PASSWORD | |||||
fi | |||||
if [[ $(user_exists ${DB_USER}) -eq 0 ]] ; then | |||||
log "Creating user ${DB_USER}" | |||||
if [[ "$(echo ${IS_FORK} | tr [[:upper:]] [[:lower:]])" == "true" ]] ; then | |||||
createuser --createdb --no-createrole --no-superuser --no-replication ${DB_USER} || die "Error creating user" | |||||
else | |||||
createuser --no-createdb --no-createrole --no-superuser --no-replication ${DB_USER} || die "Error creating user" | |||||
fi | |||||
DB_PASS="$(cat ${BUBBLE_HOME}/.BUBBLE_PG_PASSWORD)" | |||||
echo "ALTER USER bubble WITH PASSWORD '${DB_PASS}'" | psql || die "Error setting user password" | |||||
fi | |||||
if [[ $(db_exists ${DB_NAME}) -eq 0 ]] ; then | |||||
log "Creating DB ${DB_NAME}" | |||||
createdb --encoding=UTF-8 ${DB_NAME} || die "Error creating DB" | |||||
fi | |||||
if [[ $(count_table_rows ${DB_NAME} account 2> /dev/null) -eq 0 ]] ; then | |||||
TEMP_DB="${DB_NAME}_$(uuid | tr -d '-')" | |||||
log "Creating tempDB ${TEMP_DB}" | |||||
createdb --encoding=UTF-8 ${TEMP_DB} || die "Error creating temp DB" | |||||
log "Populating tempDB ${TEMP_DB} with bubble.sql.gz" | |||||
zcat /home/bubble/sql/bubble.sql.gz | psql ${TEMP_DB} || die "Error writing database schema/data" | |||||
DB_KEY="$(cat ${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY)" | |||||
TO_KEY="$(uuid)" | |||||
if [[ -z "${TO_KEY}" ]] ; then | |||||
dropdb ${TEMP_DB} | |||||
die "${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY does not exist or is empty" | |||||
fi | |||||
log "Dumping schema from ${TEMP_DB} -> ${DB_NAME}" | |||||
pg_dump --schema-only ${TEMP_DB} | psql ${DB_NAME} | |||||
# log "Rekeying: fromKey=${DB_KEY}, toKey=${TO_KEY}" | |||||
java -cp ${BUBBLE_JAR} bubble.main.RekeyDatabaseMain \ | |||||
--jar ${BUBBLE_JAR} \ | |||||
--db-user ${DB_USER} \ | |||||
--db-password "${DB_PASS}" \ | |||||
--from-db ${TEMP_DB} \ | |||||
--from-key "${DB_KEY}" \ | |||||
--to-db ${DB_NAME} \ | |||||
--to-key "${TO_KEY}" 2>&1 || (dropdb ${TEMP_DB} ; die "Error re-keying database") | |||||
# --to-key "${TO_KEY}" 2>&1 | tee -a ${LOG} || (dropdb ${TEMP_DB} ; die "Error re-keying database") | |||||
log "Rekey successful, dropping ${TEMP_DB}" | |||||
dropdb ${TEMP_DB} | |||||
log "Saving ${TO_KEY} to ${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY" | |||||
echo -n "${TO_KEY}" > ${BUBBLE_HOME}/.BUBBLE_DB_ENCRYPTION_KEY | |||||
fi | |||||
echo "DELETE FROM bubble_node_key WHERE node IN (SELECT uuid FROM bubble_node WHERE ip4='127.0.0.1' OR ip4='' OR ip4 IS NULL)" | psql ${DB_NAME} \ | |||||
|| die "Error removing bubble_node_keys with remote_host=127.0.0.1" | |||||
echo "DELETE FROM bubble_node WHERE ip4='127.0.0.1'" | psql ${DB_NAME} \ | |||||
|| die "Error removing bubble_nodes with ip4=127.0.0.1" | |||||
if [[ "${INSTALL_MODE}" == "node" ]] ; then | |||||
echo "UPDATE account SET locked=true" | psql ${DB_NAME} \ | |||||
|| die "Error locking accounts" | |||||
fi |
@@ -0,0 +1,63 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
SCRIPT="${0}" | |||||
SCRIPT_DIR=$(cd $(dirname ${SCRIPT}) && pwd) | |||||
LOG=/tmp/$(basename ${0}).log | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
log "${1}" | |||||
exit 1 | |||||
} | |||||
function log { | |||||
echo "${1}" | tee -a ${LOG} | |||||
} | |||||
if [[ $(whoami) != "bubble" ]] ; then | |||||
if [[ $(whoami) == "root" ]] ; then | |||||
sudo -H -u bubble ${0} | |||||
exit $? | |||||
fi | |||||
die "${0} must be run as bubble" | |||||
fi | |||||
if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then | |||||
if [[ -f "${HOME}/bubble/current/bubble.env" ]] ; then | |||||
LOCALSTORAGE_BASE_DIR=$(cat "${HOME}/bubble/current/bubble.env" | grep -v '^#' | grep LOCALSTORAGE_BASE_DIR | awk -F '=' '{print $2}' | tr -d ' ') | |||||
fi | |||||
fi | |||||
if [[ -z "${LOCALSTORAGE_BASE_DIR}" ]] ; then | |||||
log "LOCALSTORAGE_BASE_DIR env var not defined, using ${HOME}/.bubble_local_storage" | |||||
LOCALSTORAGE_BASE_DIR="${HOME}/.bubble_local_storage" | |||||
fi | |||||
if [[ -z "${BUBBLE_JAR}" ]] ; then | |||||
if [[ -f "${HOME}/current/bubble.jar" ]] ; then | |||||
BUBBLE_JAR="${HOME}/current/bubble.jar" | |||||
fi | |||||
fi | |||||
if [[ -z "${BUBBLE_JAR}" ]] ; then | |||||
die "BUBBLE_JAR env var not set and no jar file found" | |||||
fi | |||||
ROLE_DIR="${HOME}/role_tgz" | |||||
if [[ ! -d "${ROLE_DIR}" ]] ; then | |||||
die "role_tgz dir not found: ${ROLE_DIR}" | |||||
fi | |||||
NETWORK_UUID="$(cat ${HOME}/self_node.json | jq -r .network)" | |||||
find ${ROLE_DIR} -type f -name "*.tgz" | while read role_tgz ; do | |||||
path="automation/roles/$(basename ${role_tgz})" | |||||
dest="${LOCALSTORAGE_BASE_DIR}/${NETWORK_UUID}/${path}" | |||||
if [[ ! -f ${dest} ]] ; then | |||||
mkdir -p $(dirname ${dest}) || die "Error creating destination directory" | |||||
cp ${role_tgz} ${dest} || die "Error copying role archive" | |||||
log "installed role ${role_tgz} -> ${dest}" | |||||
else | |||||
log "role already installed ${role_tgz} -> ${dest}" | |||||
fi | |||||
done |
@@ -0,0 +1,99 @@ | |||||
# PostgreSQL Client Authentication Configuration File | |||||
# =================================================== | |||||
# | |||||
# Refer to the "Client Authentication" section in the PostgreSQL | |||||
# documentation for a complete description of this file. A short | |||||
# synopsis follows. | |||||
# | |||||
# This file controls: which hosts are allowed to connect, how clients | |||||
# are authenticated, which PostgreSQL user names they can use, which | |||||
# databases they can access. Records take one of these forms: | |||||
# | |||||
# local DATABASE USER METHOD [OPTIONS] | |||||
# host DATABASE USER ADDRESS METHOD [OPTIONS] | |||||
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] | |||||
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] | |||||
# | |||||
# (The uppercase items must be replaced by actual values.) | |||||
# | |||||
# The first field is the connection type: "local" is a Unix-domain | |||||
# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, | |||||
# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a | |||||
# plain TCP/IP socket. | |||||
# | |||||
# DATABASE can be "all", "sameuser", "samerole", "replication", a | |||||
# database name, or a comma-separated list thereof. The "all" | |||||
# keyword does not match "replication". Access to replication | |||||
# must be enabled in a separate record (see example below). | |||||
# | |||||
# USER can be "all", a user name, a group name prefixed with "+", or a | |||||
# comma-separated list thereof. In both the DATABASE and USER fields | |||||
# you can also write a file name prefixed with "@" to include names | |||||
# from a separate file. | |||||
# | |||||
# ADDRESS specifies the set of hosts the record matches. It can be a | |||||
# host name, or it is made up of an IP address and a CIDR mask that is | |||||
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that | |||||
# specifies the number of significant bits in the mask. A host name | |||||
# that starts with a dot (.) matches a suffix of the actual host name. | |||||
# Alternatively, you can write an IP address and netmask in separate | |||||
# columns to specify the set of hosts. Instead of a CIDR-address, you | |||||
# can write "samehost" to match any of the server's own IP addresses, | |||||
# or "samenet" to match any address in any subnet that the server is | |||||
# directly connected to. | |||||
# | |||||
# METHOD can be "trust", "reject", "md5", "password", "gss", "sspi", | |||||
# "ident", "peer", "pam", "ldap", "radius" or "cert". Note that | |||||
# "password" sends passwords in clear text; "md5" is preferred since | |||||
# it sends encrypted passwords. | |||||
# | |||||
# OPTIONS are a set of options for the authentication in the format | |||||
# NAME=VALUE. The available options depend on the different | |||||
# authentication methods -- refer to the "Client Authentication" | |||||
# section in the documentation for a list of which options are | |||||
# available for which authentication methods. | |||||
# | |||||
# Database and user names containing spaces, commas, quotes and other | |||||
# special characters must be quoted. Quoting one of the keywords | |||||
# "all", "sameuser", "samerole" or "replication" makes the name lose | |||||
# its special character, and just match a database or username with | |||||
# that name. | |||||
# | |||||
# This file is read on server startup and when the postmaster receives | |||||
# a SIGHUP signal. If you edit the file on a running system, you have | |||||
# to SIGHUP the postmaster for the changes to take effect. You can | |||||
# use "pg_ctl reload" to do that. | |||||
# Put your actual configuration here | |||||
# ---------------------------------- | |||||
# | |||||
# If you want to allow non-local connections, you need to add more | |||||
# "host" records. In that case you will also need to make PostgreSQL | |||||
# listen on a non-local interface via the listen_addresses | |||||
# configuration parameter, or via the -i or -h command line switches. | |||||
# DO NOT DISABLE! | |||||
# If you change this first entry you will need to make sure that the | |||||
# database superuser can access the database using some other method. | |||||
# Noninteractive access to all databases is required during automatic | |||||
# maintenance (custom daily cronjobs, replication, and similar tasks). | |||||
# | |||||
# Database administrative login by Unix domain socket | |||||
local all postgres peer | |||||
# TYPE DATABASE USER ADDRESS METHOD | |||||
# "local" is for Unix domain socket connections only | |||||
local all all peer | |||||
# IPv4 local connections: | |||||
host all all 127.0.0.1/32 md5 | |||||
# IPv6 local connections: | |||||
host all all ::1/128 md5 | |||||
# Allow replication connections from localhost, by a user with the | |||||
# replication privilege. | |||||
#local replication postgres peer | |||||
#host replication postgres 127.0.0.1/32 md5 | |||||
#host replication postgres ::1/128 md5 |
@@ -0,0 +1,614 @@ | |||||
# ----------------------------- | |||||
# PostgreSQL configuration file | |||||
# ----------------------------- | |||||
# | |||||
# This file consists of lines of the form: | |||||
# | |||||
# name = value | |||||
# | |||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with | |||||
# "#" anywhere on a line. The complete list of parameter names and allowed | |||||
# values can be found in the PostgreSQL documentation. | |||||
# | |||||
# The commented-out settings shown in this file represent the default values. | |||||
# Re-commenting a setting is NOT sufficient to revert it to the default value; | |||||
# you need to reload the server. | |||||
# | |||||
# This file is read on server startup and when the server receives a SIGHUP | |||||
# signal. If you edit the file on a running system, you have to SIGHUP the | |||||
# server for the changes to take effect, or use "pg_ctl reload". Some | |||||
# parameters, which are marked below, require a server shutdown and restart to | |||||
# take effect. | |||||
# | |||||
# Any parameter can also be given as a command-line option to the server, e.g., | |||||
# "postgres -c log_connections=on". Some parameters can be changed at run time | |||||
# with the "SET" SQL command. | |||||
# | |||||
# Memory units: kB = kilobytes Time units: ms = milliseconds | |||||
# MB = megabytes s = seconds | |||||
# GB = gigabytes min = minutes | |||||
# TB = terabytes h = hours | |||||
# d = days | |||||
#------------------------------------------------------------------------------ | |||||
# FILE LOCATIONS | |||||
#------------------------------------------------------------------------------ | |||||
# The default values of these variables are driven from the -D command-line | |||||
# option or PGDATA environment variable, represented here as ConfigDir. | |||||
data_directory = '/var/lib/postgresql/9.4/main' # use data in another directory | |||||
# (change requires restart) | |||||
hba_file = '/etc/postgresql/9.4/main/pg_hba.conf' # host-based authentication file | |||||
# (change requires restart) | |||||
ident_file = '/etc/postgresql/9.4/main/pg_ident.conf' # ident configuration file | |||||
# (change requires restart) | |||||
# If external_pid_file is not explicitly set, no extra PID file is written. | |||||
external_pid_file = '/var/run/postgresql/9.4-main.pid' # write an extra PID file | |||||
# (change requires restart) | |||||
#------------------------------------------------------------------------------ | |||||
# CONNECTIONS AND AUTHENTICATION | |||||
#------------------------------------------------------------------------------ | |||||
# - Connection Settings - | |||||
listen_addresses = 'localhost,127.0.0.1' # what IP address(es) to listen on; | |||||
# comma-separated list of addresses; | |||||
# defaults to 'localhost'; use '*' for all | |||||
# (change requires restart) | |||||
port = 5432 # (change requires restart) | |||||
max_connections = 197 # (change requires restart) | |||||
# Note: Increasing max_connections costs ~400 bytes of shared memory per | |||||
# connection slot, plus lock space (see max_locks_per_transaction). | |||||
superuser_reserved_connections = 3 # (change requires restart) | |||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories | |||||
# (change requires restart) | |||||
#unix_socket_group = '' # (change requires restart) | |||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation | |||||
# (change requires restart) | |||||
#bonjour = off # advertise server via Bonjour | |||||
# (change requires restart) | |||||
#bonjour_name = '' # defaults to the computer name | |||||
# (change requires restart) | |||||
# - Security and Authentication - | |||||
#authentication_timeout = 1min # 1s-600s | |||||
ssl = true # (change requires restart) | |||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers | |||||
# (change requires restart) | |||||
#ssl_prefer_server_ciphers = on # (change requires restart) | |||||
#ssl_ecdh_curve = 'prime256v1' # (change requires restart) | |||||
#ssl_renegotiation_limit = 512MB # amount of data between renegotiations | |||||
ssl_cert_file = '/etc/ssl/certs/ssl-cert-snakeoil.pem' # (change requires restart) | |||||
ssl_key_file = '/etc/ssl/private/ssl-cert-snakeoil.key' # (change requires restart) | |||||
#ssl_ca_file = '' # (change requires restart) | |||||
#ssl_crl_file = '' # (change requires restart) | |||||
#password_encryption = on | |||||
#db_user_namespace = off | |||||
# GSSAPI using Kerberos | |||||
#krb_server_keyfile = '' | |||||
#krb_caseins_users = off | |||||
# - TCP Keepalives - | |||||
# see "man 7 tcp" for details | |||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; | |||||
# 0 selects the system default | |||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; | |||||
# 0 selects the system default | |||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT; | |||||
# 0 selects the system default | |||||
#------------------------------------------------------------------------------ | |||||
# RESOURCE USAGE (except WAL) | |||||
#------------------------------------------------------------------------------ | |||||
# - Memory - | |||||
shared_buffers = 128MB # min 128kB | |||||
# (change requires restart) | |||||
#huge_pages = try # on, off, or try | |||||
# (change requires restart) | |||||
#temp_buffers = 8MB # min 800kB | |||||
#max_prepared_transactions = 0 # zero disables the feature | |||||
# (change requires restart) | |||||
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory | |||||
# per transaction slot, plus lock space (see max_locks_per_transaction). | |||||
# It is not advisable to set max_prepared_transactions nonzero unless you | |||||
# actively intend to use prepared transactions. | |||||
#work_mem = 4MB # min 64kB | |||||
#maintenance_work_mem = 64MB # min 1MB | |||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem | |||||
#max_stack_depth = 2MB # min 100kB | |||||
dynamic_shared_memory_type = posix # the default is the first option | |||||
# supported by the operating system: | |||||
# posix | |||||
# sysv | |||||
# windows | |||||
# mmap | |||||
# use none to disable dynamic shared memory | |||||
# - Disk - | |||||
#temp_file_limit = -1 # limits per-session temp file space | |||||
# in kB, or -1 for no limit | |||||
# - Kernel Resource Usage - | |||||
#max_files_per_process = 1000 # min 25 | |||||
# (change requires restart) | |||||
#shared_preload_libraries = '' # (change requires restart) | |||||
# - Cost-Based Vacuum Delay - | |||||
#vacuum_cost_delay = 0 # 0-100 milliseconds | |||||
#vacuum_cost_page_hit = 1 # 0-10000 credits | |||||
#vacuum_cost_page_miss = 10 # 0-10000 credits | |||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits | |||||
#vacuum_cost_limit = 200 # 1-10000 credits | |||||
# - Background Writer - | |||||
#bgwriter_delay = 200ms # 10-10000ms between rounds | |||||
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round | |||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round | |||||
# - Asynchronous Behavior - | |||||
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching | |||||
#max_worker_processes = 8 | |||||
#------------------------------------------------------------------------------ | |||||
# WRITE AHEAD LOG | |||||
#------------------------------------------------------------------------------ | |||||
# - Settings - | |||||
#wal_level = minimal # minimal, archive, hot_standby, or logical | |||||
# (change requires restart) | |||||
#fsync = on # turns forced synchronization on or off | |||||
#synchronous_commit = on # synchronization level; | |||||
# off, local, remote_write, or on | |||||
#wal_sync_method = fsync # the default is the first option | |||||
# supported by the operating system: | |||||
# open_datasync | |||||
# fdatasync (default on Linux) | |||||
# fsync | |||||
# fsync_writethrough | |||||
# open_sync | |||||
#full_page_writes = on # recover from partial page writes | |||||
#wal_log_hints = off # also do full page writes of non-critical updates | |||||
# (change requires restart) | |||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers | |||||
# (change requires restart) | |||||
#wal_writer_delay = 200ms # 1-10000 milliseconds | |||||
#commit_delay = 0 # range 0-100000, in microseconds | |||||
#commit_siblings = 5 # range 1-1000 | |||||
# - Checkpoints - | |||||
#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each | |||||
#checkpoint_timeout = 5min # range 30s-1h | |||||
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 | |||||
#checkpoint_warning = 30s # 0 disables | |||||
# - Archiving - | |||||
#archive_mode = off # allows archiving to be done | |||||
# (change requires restart) | |||||
#archive_command = '' # command to use to archive a logfile segment | |||||
# placeholders: %p = path of file to archive | |||||
# %f = file name only | |||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' | |||||
#archive_timeout = 0 # force a logfile segment switch after this | |||||
# number of seconds; 0 disables | |||||
#------------------------------------------------------------------------------ | |||||
# REPLICATION | |||||
#------------------------------------------------------------------------------ | |||||
# - Sending Server(s) - | |||||
# Set these on the master and on any standby that will send replication data. | |||||
#max_wal_senders = 0 # max number of walsender processes | |||||
# (change requires restart) | |||||
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables | |||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables | |||||
#max_replication_slots = 0 # max number of replication slots | |||||
# (change requires restart) | |||||
# - Master Server - | |||||
# These settings are ignored on a standby server. | |||||
#synchronous_standby_names = '' # standby servers that provide sync rep | |||||
# comma-separated list of application_name | |||||
# from standby(s); '*' = all | |||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed | |||||
# - Standby Servers - | |||||
# These settings are ignored on a master server. | |||||
#hot_standby = off # "on" allows queries during recovery | |||||
# (change requires restart) | |||||
#max_standby_archive_delay = 30s # max delay before canceling queries | |||||
# when reading WAL from archive; | |||||
# -1 allows indefinite delay | |||||
#max_standby_streaming_delay = 30s # max delay before canceling queries | |||||
# when reading streaming WAL; | |||||
# -1 allows indefinite delay | |||||
#wal_receiver_status_interval = 10s # send replies at least this often | |||||
# 0 disables | |||||
#hot_standby_feedback = off # send info from standby to prevent | |||||
# query conflicts | |||||
#wal_receiver_timeout = 60s # time that receiver waits for | |||||
# communication from master | |||||
# in milliseconds; 0 disables | |||||
#------------------------------------------------------------------------------ | |||||
# QUERY TUNING | |||||
#------------------------------------------------------------------------------ | |||||
# - Planner Method Configuration - | |||||
#enable_bitmapscan = on | |||||
#enable_hashagg = on | |||||
#enable_hashjoin = on | |||||
#enable_indexscan = on | |||||
#enable_indexonlyscan = on | |||||
#enable_material = on | |||||
#enable_mergejoin = on | |||||
#enable_nestloop = on | |||||
#enable_seqscan = on | |||||
#enable_sort = on | |||||
#enable_tidscan = on | |||||
# - Planner Cost Constants - | |||||
#seq_page_cost = 1.0 # measured on an arbitrary scale | |||||
#random_page_cost = 4.0 # same scale as above | |||||
#cpu_tuple_cost = 0.01 # same scale as above | |||||
#cpu_index_tuple_cost = 0.005 # same scale as above | |||||
#cpu_operator_cost = 0.0025 # same scale as above | |||||
#effective_cache_size = 4GB | |||||
# - Genetic Query Optimizer - | |||||
#geqo = on | |||||
#geqo_threshold = 12 | |||||
#geqo_effort = 5 # range 1-10 | |||||
#geqo_pool_size = 0 # selects default based on effort | |||||
#geqo_generations = 0 # selects default based on effort | |||||
#geqo_selection_bias = 2.0 # range 1.5-2.0 | |||||
#geqo_seed = 0.0 # range 0.0-1.0 | |||||
# - Other Planner Options - | |||||
#default_statistics_target = 100 # range 1-10000 | |||||
#constraint_exclusion = partition # on, off, or partition | |||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0 | |||||
#from_collapse_limit = 8 | |||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit | |||||
# JOIN clauses | |||||
#------------------------------------------------------------------------------ | |||||
# ERROR REPORTING AND LOGGING | |||||
#------------------------------------------------------------------------------ | |||||
# - Where to Log - | |||||
#log_destination = 'stderr' # Valid values are combinations of | |||||
# stderr, csvlog, syslog, and eventlog, | |||||
# depending on platform. csvlog | |||||
# requires logging_collector to be on. | |||||
# This is used when logging to stderr: | |||||
#logging_collector = off # Enable capturing of stderr and csvlog | |||||
# into log files. Required to be on for | |||||
# csvlogs. | |||||
# (change requires restart) | |||||
# These are only used if logging_collector is on: | |||||
#log_directory = 'pg_log' # directory where log files are written, | |||||
# can be absolute or relative to PGDATA | |||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, | |||||
# can include strftime() escapes | |||||
#log_file_mode = 0600 # creation mode for log files, | |||||
# begin with 0 to use octal notation | |||||
#log_truncate_on_rotation = off # If on, an existing log file with the | |||||
# same name as the new log file will be | |||||
# truncated rather than appended to. | |||||
# But such truncation only occurs on | |||||
# time-driven rotation, not on restarts | |||||
# or size-driven rotation. Default is | |||||
# off, meaning append to existing files | |||||
# in all cases. | |||||
#log_rotation_age = 1d # Automatic rotation of logfiles will | |||||
# happen after that time. 0 disables. | |||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will | |||||
# happen after that much log output. | |||||
# 0 disables. | |||||
# These are relevant when logging to syslog: | |||||
#syslog_facility = 'LOCAL0' | |||||
#syslog_ident = 'postgres' | |||||
# This is only relevant when logging to eventlog (win32): | |||||
#event_source = 'PostgreSQL' | |||||
# - When to Log - | |||||
#client_min_messages = notice # values in order of decreasing detail: | |||||
# debug5 | |||||
# debug4 | |||||
# debug3 | |||||
# debug2 | |||||
# debug1 | |||||
# log | |||||
# notice | |||||
# warning | |||||
# error | |||||
#log_min_messages = warning # values in order of decreasing detail: | |||||
# debug5 | |||||
# debug4 | |||||
# debug3 | |||||
# debug2 | |||||
# debug1 | |||||
# info | |||||
# notice | |||||
# warning | |||||
# error | |||||
# log | |||||
# fatal | |||||
# panic | |||||
#log_min_error_statement = error # values in order of decreasing detail: | |||||
# debug5 | |||||
# debug4 | |||||
# debug3 | |||||
# debug2 | |||||
# debug1 | |||||
# info | |||||
# notice | |||||
# warning | |||||
# error | |||||
# log | |||||
# fatal | |||||
# panic (effectively off) | |||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements | |||||
# and their durations, > 0 logs only | |||||
# statements running at least this number | |||||
# of milliseconds | |||||
# - What to Log - | |||||
#debug_print_parse = off | |||||
#debug_print_rewritten = off | |||||
#debug_print_plan = off | |||||
#debug_pretty_print = on | |||||
#log_checkpoints = off | |||||
#log_connections = off | |||||
#log_disconnections = off | |||||
#log_duration = off | |||||
#log_error_verbosity = default # terse, default, or verbose messages | |||||
#log_hostname = off | |||||
log_line_prefix = '%t [%p-%l] %q%u@%d ' # special values: | |||||
# %a = application name | |||||
# %u = user name | |||||
# %d = database name | |||||
# %r = remote host and port | |||||
# %h = remote host | |||||
# %p = process ID | |||||
# %t = timestamp without milliseconds | |||||
# %m = timestamp with milliseconds | |||||
# %i = command tag | |||||
# %e = SQL state | |||||
# %c = session ID | |||||
# %l = session line number | |||||
# %s = session start timestamp | |||||
# %v = virtual transaction ID | |||||
# %x = transaction ID (0 if none) | |||||
# %q = stop here in non-session | |||||
# processes | |||||
# %% = '%' | |||||
# e.g. '<%u%%%d> ' | |||||
#log_lock_waits = off # log lock waits >= deadlock_timeout | |||||
#log_statement = 'none' # none, ddl, mod, all | |||||
#log_temp_files = -1 # log temporary files equal or larger | |||||
# than the specified size in kilobytes; | |||||
# -1 disables, 0 logs all temp files | |||||
log_timezone = 'localtime' | |||||
#------------------------------------------------------------------------------ | |||||
# RUNTIME STATISTICS | |||||
#------------------------------------------------------------------------------ | |||||
# - Query/Index Statistics Collector - | |||||
#track_activities = on | |||||
#track_counts = on | |||||
#track_io_timing = off | |||||
#track_functions = none # none, pl, all | |||||
#track_activity_query_size = 1024 # (change requires restart) | |||||
#update_process_title = on | |||||
stats_temp_directory = '/var/run/postgresql/9.4-main.pg_stat_tmp' | |||||
# - Statistics Monitoring - | |||||
#log_parser_stats = off | |||||
#log_planner_stats = off | |||||
#log_executor_stats = off | |||||
#log_statement_stats = off | |||||
#------------------------------------------------------------------------------ | |||||
# AUTOVACUUM PARAMETERS | |||||
#------------------------------------------------------------------------------ | |||||
#autovacuum = on # Enable autovacuum subprocess? 'on' | |||||
# requires track_counts to also be on. | |||||
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and | |||||
# their durations, > 0 logs only | |||||
# actions running at least this number | |||||
# of milliseconds. | |||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses | |||||
# (change requires restart) | |||||
#autovacuum_naptime = 1min # time between autovacuum runs | |||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before | |||||
# vacuum | |||||
#autovacuum_analyze_threshold = 50 # min number of row updates before | |||||
# analyze | |||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum | |||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze | |||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum | |||||
# (change requires restart) | |||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age | |||||
# before forced vacuum | |||||
# (change requires restart) | |||||
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for | |||||
# autovacuum, in milliseconds; | |||||
# -1 means use vacuum_cost_delay | |||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for | |||||
# autovacuum, -1 means use | |||||
# vacuum_cost_limit | |||||
#------------------------------------------------------------------------------ | |||||
# CLIENT CONNECTION DEFAULTS | |||||
#------------------------------------------------------------------------------ | |||||
# - Statement Behavior - | |||||
#search_path = '"$user",public' # schema names | |||||
#default_tablespace = '' # a tablespace name, '' uses the default | |||||
#temp_tablespaces = '' # a list of tablespace names, '' uses | |||||
# only default tablespace | |||||
#check_function_bodies = on | |||||
#default_transaction_isolation = 'read committed' | |||||
#default_transaction_read_only = off | |||||
#default_transaction_deferrable = off | |||||
#session_replication_role = 'origin' | |||||
#statement_timeout = 0 # in milliseconds, 0 is disabled | |||||
#lock_timeout = 0 # in milliseconds, 0 is disabled | |||||
#vacuum_freeze_min_age = 50000000 | |||||
#vacuum_freeze_table_age = 150000000 | |||||
#vacuum_multixact_freeze_min_age = 5000000 | |||||
#vacuum_multixact_freeze_table_age = 150000000 | |||||
#bytea_output = 'hex' # hex, escape | |||||
#xmlbinary = 'base64' | |||||
#xmloption = 'content' | |||||
# - Locale and Formatting - | |||||
datestyle = 'iso, mdy' | |||||
#intervalstyle = 'postgres' | |||||
timezone = 'localtime' | |||||
#timezone_abbreviations = 'Default' # Select the set of available time zone | |||||
# abbreviations. Currently, there are | |||||
# Default | |||||
# Australia (historical usage) | |||||
# India | |||||
# You can create your own file in | |||||
# share/timezonesets/. | |||||
#extra_float_digits = 0 # min -15, max 3 | |||||
#client_encoding = sql_ascii # actually, defaults to database | |||||
# encoding | |||||
# These settings are initialized by initdb, but they can be changed. | |||||
lc_messages = 'en_US.UTF-8' # locale for system error message | |||||
# strings | |||||
lc_monetary = 'en_US.UTF-8' # locale for monetary formatting | |||||
lc_numeric = 'en_US.UTF-8' # locale for number formatting | |||||
lc_time = 'en_US.UTF-8' # locale for time formatting | |||||
# default configuration for text search | |||||
default_text_search_config = 'pg_catalog.english' | |||||
# - Other Defaults - | |||||
#dynamic_library_path = '$libdir' | |||||
#local_preload_libraries = '' | |||||
#session_preload_libraries = '' | |||||
#------------------------------------------------------------------------------ | |||||
# LOCK MANAGEMENT | |||||
#------------------------------------------------------------------------------ | |||||
#deadlock_timeout = 1s | |||||
#max_locks_per_transaction = 64 # min 10 | |||||
# (change requires restart) | |||||
# Note: Each lock table slot uses ~270 bytes of shared memory, and there are | |||||
# max_locks_per_transaction * (max_connections + max_prepared_transactions) | |||||
# lock table slots. | |||||
#max_pred_locks_per_transaction = 64 # min 10 | |||||
# (change requires restart) | |||||
#------------------------------------------------------------------------------ | |||||
# VERSION/PLATFORM COMPATIBILITY | |||||
#------------------------------------------------------------------------------ | |||||
# - Previous PostgreSQL Versions - | |||||
#array_nulls = on | |||||
#backslash_quote = safe_encoding # on, off, or safe_encoding | |||||
#default_with_oids = off | |||||
#escape_string_warning = on | |||||
#lo_compat_privileges = off | |||||
#quote_all_identifiers = off | |||||
#sql_inheritance = on | |||||
#standard_conforming_strings = on | |||||
#synchronize_seqscans = on | |||||
# - Other Platforms and Clients - | |||||
#transform_null_equals = off | |||||
#------------------------------------------------------------------------------ | |||||
# ERROR HANDLING | |||||
#------------------------------------------------------------------------------ | |||||
#exit_on_error = off # terminate session on any error? | |||||
#restart_after_crash = on # reinitialize after backend crash? | |||||
#------------------------------------------------------------------------------ | |||||
# CONFIG FILE INCLUDES | |||||
#------------------------------------------------------------------------------ | |||||
# These options allow settings to be loaded from files other than the | |||||
# default postgresql.conf. | |||||
#include_dir = 'conf.d' # include files ending in '.conf' from | |||||
# directory 'conf.d' | |||||
#include_if_exists = 'exists.conf' # include file only if it exists | |||||
#include = 'special.conf' # include file | |||||
#------------------------------------------------------------------------------ | |||||
# CUSTOMIZED OPTIONS | |||||
#------------------------------------------------------------------------------ | |||||
# Add settings for extensions here |
@@ -0,0 +1,11 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
file=${1:?no file provided} | |||||
owner=${2:?no owner provided} | |||||
group=${3:?no group provided} | |||||
if [[ ! -f ${file} ]] ; then | |||||
touch ${file} && chmod 660 ${file} && chown ${owner} ${file} && chgrp ${group} ${file} && uuid | tr -d '\n' > ${file} | |||||
fi |
@@ -0,0 +1,41 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
LOG=/tmp/bubble.ssh_keys_monitor.log | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
log "${1}" | |||||
exit 1 | |||||
} | |||||
function log { | |||||
echo "$(date): ${1}" >> ${LOG} | |||||
} | |||||
SSH_KEY_BASE=/root/.ssh | |||||
if [[ ! -d ${SSH_KEY_BASE} ]] ; then | |||||
die "SSH key directory ${SSH_KEY_BASE} not found" | |||||
fi | |||||
BUBBLE_KEY_MARKER=/home/bubble/.refresh_ssh_keys | |||||
ROOT_KEY_MARKER=${SSH_KEY_BASE}/.refresh_ssh_keys | |||||
if [[ ! -f ${BUBBLE_KEY_MARKER} ]] ; then | |||||
touch ${BUBBLE_KEY_MARKER} && chown bubble ${BUBBLE_KEY_MARKER} | |||||
fi | |||||
if [[ ! -f ${ROOT_KEY_MARKER} ]] ; then | |||||
touch ${ROOT_KEY_MARKER} | |||||
fi | |||||
log "Watching marker file ${BUBBLE_KEY_MARKER} ..." | |||||
while : ; do | |||||
if [[ $(stat -c %Y ${BUBBLE_KEY_MARKER}) -gt $(stat -c %Y ${ROOT_KEY_MARKER}) ]] ; then | |||||
touch ${ROOT_KEY_MARKER} | |||||
sleep 5s | |||||
log "Refreshing Bubble SSH keys..." | |||||
/usr/local/sbin/refresh_bubble_ssh_keys.sh && log "Bubble SSH keys successfully refreshed" || log "Error refreshing Bubble SSH keys" | |||||
fi | |||||
sleep 10s | |||||
done |
@@ -0,0 +1,5 @@ | |||||
[program:refresh_bubble_ssh_keys_monitor] | |||||
stdout_logfile = /dev/null | |||||
stderr_logfile = /dev/null | |||||
command=/usr/local/sbin/refresh_bubble_ssh_keys_monitor.sh |
@@ -0,0 +1,102 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Install OpenJDK 11 JRE (headless), redis, uuid and jq | |||||
apt: | |||||
name: [ 'openjdk-11-jre-headless', 'redis', 'uuid', 'jq', 'zip' ] | |||||
state: present | |||||
update_cache: yes | |||||
- import_tasks: postgresql.yml | |||||
- name: Create bubble user | |||||
user: | |||||
name: bubble | |||||
comment: bubble user | |||||
shell: /bin/bash | |||||
system: yes | |||||
home: /home/bubble | |||||
- name: Creates bubble API version dir | |||||
file: | |||||
path: /home/bubble/api | |||||
owner: bubble | |||||
group: bubble | |||||
mode: 0555 | |||||
state: directory | |||||
- name: Creates bubble logs dir | |||||
file: | |||||
path: /home/bubble/logs | |||||
owner: bubble | |||||
group: root | |||||
mode: 0770 | |||||
state: directory | |||||
- name: Install bubble jar | |||||
copy: | |||||
src: bubble.jar | |||||
dest: /home/bubble/api/bubble.jar | |||||
owner: bubble | |||||
group: bubble | |||||
mode: 0444 | |||||
- name: Unpack site files | |||||
shell: cd /home/bubble && unzip -u /home/bubble/api/bubble.jar 'site/*' && chown -R bubble site | |||||
- name: Install helper scripts | |||||
copy: | |||||
src: "{{ item }}" | |||||
dest: "/usr/local/bin/{{ item }}" | |||||
owner: root | |||||
group: root | |||||
mode: 0555 | |||||
with_items: | |||||
- "bsql.sh" | |||||
- "random_password.sh" | |||||
- "init_roles.sh" | |||||
- name: Install standard bubble scripts | |||||
copy: | |||||
src: "{{ item }}" | |||||
dest: "/usr/local/bin/" | |||||
owner: root | |||||
group: root | |||||
mode: 0555 | |||||
with_fileglob: | |||||
- "scripts/*" | |||||
- name: Link current version to the one we just installed | |||||
file: | |||||
src: /home/bubble/api | |||||
dest: /home/bubble/current | |||||
owner: bubble | |||||
group: bubble | |||||
state: link | |||||
- name: Creates bubble SQL dir | |||||
file: | |||||
path: /home/bubble/sql | |||||
owner: bubble | |||||
group: postgres | |||||
mode: 0550 | |||||
state: directory | |||||
- name: Install DB initializer | |||||
copy: | |||||
src: "{{ item }}" | |||||
dest: "/usr/local/bin/{{ item }}" | |||||
owner: root | |||||
group: postgres | |||||
mode: 0550 | |||||
with_items: | |||||
- init_bubble_db.sh | |||||
- name: Install refresh_bubble_ssh_keys monitor | |||||
copy: | |||||
src: "refresh_bubble_ssh_keys_monitor.sh" | |||||
dest: "/usr/local/sbin/refresh_bubble_ssh_keys_monitor.sh" | |||||
owner: root | |||||
group: root | |||||
mode: 0500 | |||||
@@ -0,0 +1,36 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Install PostgreSQL | |||||
apt: | |||||
name: [ 'postgresql-10', 'libpq-dev', 'python-psycopg2' ] | |||||
state: present | |||||
update_cache: yes | |||||
- name: Install PostgreSQL files | |||||
copy: | |||||
src: "{{ item }}" | |||||
dest: /etc/postgresql/10/main/ | |||||
owner: postgres | |||||
group: postgres | |||||
mode: 0400 | |||||
with_items: | |||||
- postgresql.conf | |||||
- pg_hba.conf | |||||
# When PostgreSQL restarts after a reboot, for some weird reason it looks in the 9.4 dir (instead of 10) for files | |||||
- name: Symlink /var/lib/postgresql/9.4 -> /var/lib/postgresql/10 | |||||
file: | |||||
src: /var/lib/postgresql/10 | |||||
dest: /var/lib/postgresql/9.4 | |||||
owner: root | |||||
group: root | |||||
state: link | |||||
- name: Symlink /etc/postgresql/9.4 -> /etc/postgresql/10 | |||||
file: | |||||
src: /etc/postgresql/10 | |||||
dest: /etc/postgresql/9.4 | |||||
owner: root | |||||
group: root | |||||
state: link |
@@ -0,0 +1,35 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
function die { | |||||
echo 1>&2 "${1}" | |||||
exit 1 | |||||
} | |||||
MITM_CERTS=/home/mitmproxy/.mitmproxy | |||||
chown -R mitmproxy ${MITM_CERTS} || die "Error setting ownership on ${MITM_CERTS}" | |||||
chgrp -R root ${MITM_CERTS} || die "Error setting group on ${MITM_CERTS}" | |||||
chmod 750 ${MITM_CERTS} || die "Error setting permissions on ${MITM_CERTS}" | |||||
chmod -R 440 ${MITM_CERTS}/* || die "Error setting permissions on ${MITM_CERTS} files" | |||||
CERTS_DIR=/home/bubble/cacerts | |||||
CERT_BASE="${1:?no cert base provided}" | |||||
MITM_BASE_NAME="${CERT_BASE}-ca" | |||||
mkdir -p ${CERTS_DIR} || die "Error creating cacerts dir" | |||||
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.pem ${CERTS_DIR} || die "Error copying pem cert" | |||||
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.pem.crt ${CERTS_DIR}/${MITM_BASE_NAME}-cert.crt || die "Error copying crt cert" | |||||
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.p12 ${CERTS_DIR} || die "Error copying p12 cert" | |||||
cp ${MITM_CERTS}/${MITM_BASE_NAME}-cert.cer ${CERTS_DIR} || die "Error copying cer cert" | |||||
chown -R bubble ${CERTS_DIR} || die "Error setting permissions on cacerts dir" | |||||
chmod 755 ${CERTS_DIR} || die "Error setting permissions on ${CERTS_DIR}" | |||||
chmod -R 444 ${CERTS_DIR}/* || die "Error setting permissions on ${CERTS_DIR} files" | |||||
CERTS_BACKUP=/home/bubble/mitm_certs | |||||
mkdir -p ${CERTS_BACKUP} || die "Error creating mitm_certs dir" | |||||
chmod 700 ${CERTS_BACKUP} || die "Error setting permissions on mitm_certs dir" | |||||
cp ${MITM_CERTS}/* ${CERTS_BACKUP} || die "Error backing up mitm_certs" | |||||
chmod -R 400 ${CERTS_BACKUP}/* || die "Error setting permissions on mitm_certs backup" | |||||
chown -R bubble ${CERTS_BACKUP} || die "Error settings ownership of mitm_certs dir" |
@@ -0,0 +1,5 @@ | |||||
[program:nodemanager] | |||||
stdout_logfile = /home/bubble/logs/nodemanager-out.log | |||||
stderr_logfile = /home/bubble/logs/nodemanager-err.log | |||||
command=/usr/sbin/bubble-nodemanager |
@@ -0,0 +1,36 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Touch first-time setup file | |||||
shell: su - bubble bash -c "if [[ ! -f /home/bubble/first_time_marker ]] ; then echo -n install > /home/bubble/first_time_marker ; fi" | |||||
when: restore_key is not defined | |||||
- name: Install copy_certs_to_bubble.sh helper | |||||
copy: | |||||
src: "copy_certs_to_bubble.sh" | |||||
dest: /usr/local/bin/copy_certs_to_bubble.sh | |||||
owner: bubble | |||||
group: root | |||||
mode: 0550 | |||||
when: install_type == 'node' | |||||
- name: Install bubble-nodemanager | |||||
copy: | |||||
src: "bubble-nodemanager" | |||||
dest: /usr/sbin/bubble-nodemanager | |||||
owner: root | |||||
group: root | |||||
mode: 0500 | |||||
- name: Install bubble-nodemanager supervisor conf file | |||||
copy: | |||||
src: "supervisor_bubble_nodemanager.conf" | |||||
dest: /etc/supervisor/conf.d/nodemanager.conf | |||||
# We cannot receive notifications until nginx is running, so start bubble API as the very last step | |||||
- name: Ensure bubble_nodemanager is started | |||||
supervisorctl: | |||||
name: '{{ item }}' | |||||
state: restarted | |||||
with_items: | |||||
- nodemanager |
@@ -0,0 +1,24 @@ | |||||
bindkey -d ^J command | |||||
bind "\011" windows | |||||
escape ^Jj | |||||
defescape ^Jj | |||||
bind o focus | |||||
bind , prev | |||||
bind . next | |||||
bind k remove | |||||
bind u focus down | |||||
bind i focus up | |||||
bind t focus top | |||||
bind b focus bottom | |||||
vbell off | |||||
# hardstatus on | |||||
# hardstatus alwayslastline | |||||
defscrollback 50000 | |||||
# Make it a login shell -- needed for rvm (Ruby) | |||||
shell -${SHELL} | |||||
term xterm-256color |
@@ -0,0 +1,35 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Update packages | |||||
apt: | |||||
update_cache: yes | |||||
- name: Remove ufw | |||||
apt: | |||||
name: ufw | |||||
state: absent | |||||
update_cache: yes | |||||
- name: Upgrade packages | |||||
apt: | |||||
update_cache: yes | |||||
upgrade: yes | |||||
- name: Install common packages | |||||
apt: | |||||
name: [ 'ntp', 'unzip', 'safe-rm', 'supervisor', 'emacs-nox', 'screen', 'xtail', 'fail2ban' ] | |||||
state: present | |||||
update_cache: yes | |||||
- name: Install screenrc file | |||||
copy: | |||||
src: dot-screenrc | |||||
dest: /root/.screenrc | |||||
- name: Start common services | |||||
service: | |||||
name: '{{ item }}' | |||||
state: restarted | |||||
with_items: | |||||
- fail2ban |
@@ -0,0 +1,7 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
fw_enable_http: true | |||||
fw_enable_admin: true | |||||
fw_enable_dns: true | |||||
fw_enable_ssh: true |
@@ -0,0 +1,182 @@ | |||||
#!/usr/bin/python3 | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
import json | |||||
import logging | |||||
import os | |||||
import sys | |||||
import time | |||||
import subprocess | |||||
logger = logging.getLogger(__name__) | |||||
logger.setLevel(logging.INFO) | |||||
EMPTY_PEERS = {'peers': [], 'ports': []} | |||||
class PeerPort(object): | |||||
def __init__(self, port): | |||||
if ':' in port: | |||||
self.proto = port[0:port.index(':')] | |||||
self.port = port[port.index(':') + 1:] | |||||
else: | |||||
self.proto = 'tcp' | |||||
self.port = port | |||||
def __str__(self): | |||||
return self.proto + ':' + self.port | |||||
def find_peers(port): | |||||
out = subprocess.run(['iptables', '-vnL', 'INPUT'], | |||||
stdout=subprocess.PIPE, | |||||
stderr=subprocess.PIPE) | |||||
peers = [] | |||||
for line in out.stdout.decode('utf-8').split('\n'): | |||||
line = line.strip() | |||||
if len(line) == 0 or line.startswith('Chain ') or line.startswith('pkts '): | |||||
continue | |||||
for parts in line.split(' '): | |||||
packets = parts[0] | |||||
bytes = parts[1] | |||||
target = parts[2] | |||||
proto = parts[3] | |||||
if proto != port.proto: | |||||
continue | |||||
opt = parts[4] | |||||
iface_in = parts[5] | |||||
iface_out = parts[6] | |||||
source = parts[7] | |||||
if source == '0.0.0.0/0': | |||||
continue | |||||
dest = parts[8] | |||||
if parts[9] != port.proto: | |||||
continue | |||||
if parts[10].startswith('dpt:'): | |||||
dest_port = int(parts[10][len('dpt:'):]) | |||||
if dest_port == port.port: | |||||
peers.append(source) | |||||
return peers | |||||
def add_peers(peers, port): | |||||
out = subprocess.run(['iptables', '-vnL', 'INPUT'], | |||||
stdout=subprocess.PIPE, | |||||
stderr=subprocess.PIPE) | |||||
lines = out.stdout.decode('utf-8').split('\n') | |||||
insert_at = len(lines) - 2 | |||||
if insert_at < 2: | |||||
raise ValueError('add_peers: insert_at was < 2: '+str(insert_at)) | |||||
for peer in peers: | |||||
logger.info("add_peers: alllowing peer: " + peer + " on port " + port) | |||||
out = subprocess.run(['iptables', '-I', 'INPUT', str(insert_at), | |||||
'-p', port.proto, '-s', peer + '/32', | |||||
'--dport', port.port, '-j', 'ACCEPT']) | |||||
logger.info("add_peers: allowed peer: " + peer + " on port " + port) | |||||
def remove_peers(peers, port): | |||||
for peer in peers: | |||||
remove_peer(peer, port) | |||||
def remove_peer(peer, port): | |||||
out = subprocess.run(['iptables', '-vnL', 'INPUT'], | |||||
stdout=subprocess.PIPE, | |||||
stderr=subprocess.PIPE) | |||||
index = 0 | |||||
for line in out.stdout.decode('utf-8').split('\n'): | |||||
line = line.strip() | |||||
if len(line) == 0 or line.startswith('Chain ') or line.startswith('pkts '): | |||||
continue | |||||
index = index + 1 | |||||
for parts in line.split(' '): | |||||
packets = parts[0] | |||||
bytes = parts[1] | |||||
target = parts[2] | |||||
proto = parts[3] | |||||
if proto != port.proto: | |||||
continue | |||||
opt = parts[4] | |||||
iface_in = parts[5] | |||||
iface_out = parts[6] | |||||
source = parts[7] | |||||
if not source.startswith(peer+'/32'): | |||||
continue | |||||
dest = parts[8] | |||||
if parts[9] != port.proto: | |||||
continue | |||||
if parts[10].startswith('dpt:'): | |||||
dest_port = int(parts[10][len('dpt:'):]) | |||||
if dest_port == port.port: | |||||
logger.info("remove_peer: removing peer: " + peer + " on port " + port) | |||||
out = subprocess.run(['iptables', '-D', 'INPUT', str(index)], | |||||
stdout=subprocess.PIPE, | |||||
stderr=subprocess.PIPE) | |||||
return True | |||||
return False | |||||
class BubblePeers(object): | |||||
def __init__(self, peer_path, self_path): | |||||
self.peer_path = peer_path | |||||
if os.path.exists(peer_path): | |||||
self.last_modified = os.path.getmtime(self.peer_path) | |||||
else: | |||||
self.last_modified = 0 | |||||
self.last_update = None | |||||
self.peers = [] | |||||
self.ports = [] | |||||
self.self_path = self_path | |||||
self.self_node = {} | |||||
def load_peers(self): | |||||
if os.path.exists(self.peer_path): | |||||
with open(self.peer_path) as f: | |||||
val = json.load(f) | |||||
else: | |||||
val = EMPTY_PEERS | |||||
self.peers = val['peers'] | |||||
self.ports = [] | |||||
for port in val['ports']: | |||||
self.ports.append(PeerPort(port)) | |||||
def load_self(self): | |||||
if os.path.exists(self.self_path): | |||||
with open(self.self_path) as f: | |||||
self.self_node = json.load(f) | |||||
def monitor(self): | |||||
self.load_peers() | |||||
self.load_self() | |||||
if os.path.exists(self.peer_path): | |||||
self.last_modified = os.path.getmtime(self.peer_path) | |||||
if self.last_update is None or self.last_update < self.last_modified: | |||||
self.load_peers() | |||||
for port in self.ports: | |||||
peers_on_port = find_peers(port) | |||||
peers_to_remove = [] | |||||
peers_to_add = [] | |||||
for peer in peers_on_port: | |||||
if peer not in self.peers: | |||||
peers_to_remove.append(peer) | |||||
for peer in self.peers: | |||||
if peer not in peers_on_port: | |||||
peers_to_add.append(peer) | |||||
remove_peers(peers_to_remove, port) | |||||
add_peers(peers_to_add, port) | |||||
if __name__ == "__main__": | |||||
peers = BubblePeers(sys.argv[1], sys.argv[2]) | |||||
interval = int(sys.argv[3]) | |||||
try: | |||||
while True: | |||||
peers.monitor() | |||||
time.sleep(interval) | |||||
except Exception as e: | |||||
logger.error("Unexpected error: " + repr(e)) |
@@ -0,0 +1,5 @@ | |||||
[program:bubble_peer_manager] | |||||
stdout_logfile = /var/log/bubble_peer_manager-out.log | |||||
stderr_logfile = /var/log/bubble_peer_manager-err.log | |||||
command=bash -c "/usr/local/bin/bubble_peer_manager.py /home/bubble/peers.json /home/bubble/self_node.json 60" |
@@ -0,0 +1,121 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Install firewall packages | |||||
apt: | |||||
name: [ 'haveged', 'iptables-persistent', 'netfilter-persistent' ] | |||||
state: present | |||||
update_cache: yes | |||||
- name: Flush iptables | |||||
iptables: | |||||
flush: true | |||||
become: yes | |||||
- name: Flush INPUT chain | |||||
iptables: | |||||
chain: INPUT | |||||
flush: yes | |||||
become: yes | |||||
- name: Flush OUTPUT chain | |||||
iptables: | |||||
chain: OUTPUT | |||||
flush: yes | |||||
become: yes | |||||
- name: Flush iptables nat table | |||||
iptables: | |||||
flush: yes | |||||
table: nat | |||||
become: yes | |||||
- name: Flush iptables mangle table | |||||
iptables: | |||||
flush: true | |||||
table: mangle | |||||
become: yes | |||||
- name: Flush iptables raw table | |||||
iptables: | |||||
flush: true | |||||
table: raw | |||||
become: yes | |||||
- name: Flush OUTPUT chain NAT table | |||||
iptables: | |||||
chain: OUTPUT | |||||
table: nat | |||||
flush: yes | |||||
become: yes | |||||
- name: Flush FORWARD chain | |||||
iptables: | |||||
chain: FORWARD | |||||
flush: yes | |||||
become: yes | |||||
- name: Flush PREROUTING chain NAT Table | |||||
iptables: | |||||
chain: PREROUTING | |||||
table: nat | |||||
flush: yes | |||||
become: yes | |||||
- name: Delete ufw chains | |||||
command: "bash -c 'iptables -F {{ item }} && iptables -X {{ item }} || echo \"chain not found: {{ item }}\"'" | |||||
with_items: | |||||
- ufw-after-forward | |||||
- ufw-after-input | |||||
- ufw-after-logging-forward | |||||
- ufw-after-logging-input | |||||
- ufw-after-logging-output | |||||
- ufw-after-output | |||||
- ufw-before-forward | |||||
- ufw-before-input | |||||
- ufw-before-logging-forward | |||||
- ufw-before-logging-input | |||||
- ufw-before-logging-output | |||||
- ufw-before-output | |||||
- ufw-reject-forward | |||||
- ufw-reject-input | |||||
- ufw-reject-output | |||||
- ufw-track-forward | |||||
- ufw-track-input | |||||
- ufw-track-output | |||||
- name: Install port manager | |||||
copy: | |||||
src: bubble_peer_manager.py | |||||
dest: /usr/local/bin/bubble_peer_manager.py | |||||
owner: root | |||||
group: root | |||||
mode: 0555 | |||||
when: fw_enable_admin | |||||
- name: Install supervisor conf file for port manager | |||||
copy: | |||||
src: supervisor_bubble_peer_manager.conf | |||||
dest: /etc/supervisor/conf.d/bubble_peer_manager.conf | |||||
when: fw_enable_admin | |||||
- include: sage.yml | |||||
when: install_type == 'sage' | |||||
- name: Creates /etc/iptables directory | |||||
file: | |||||
path: /etc/iptables | |||||
state: directory | |||||
- name: save iptables v4 rules | |||||
shell: iptables-save > /etc/iptables/rules.v4 | |||||
become: yes | |||||
- name: save iptables v6 rules | |||||
shell: ip6tables-save > /etc/iptables/rules.v6 | |||||
become: yes | |||||
- supervisorctl: | |||||
name: bubble_peer_manager | |||||
state: restarted | |||||
when: fw_enable_admin |
@@ -0,0 +1,61 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Allow all from local | |||||
iptables: | |||||
chain: INPUT | |||||
in_interface: lo | |||||
jump: ACCEPT | |||||
comment: Allow all from local | |||||
become: yes | |||||
- name: Allow related and established connections | |||||
iptables: | |||||
chain: INPUT | |||||
ctstate: ESTABLISHED,RELATED | |||||
jump: ACCEPT | |||||
comment: Allow related and established connections | |||||
become: yes | |||||
- name: Allow SSH | |||||
iptables: | |||||
chain: INPUT | |||||
protocol: tcp | |||||
destination_port: 22 | |||||
ctstate: NEW | |||||
syn: match | |||||
jump: ACCEPT | |||||
comment: Accept new SSH connections | |||||
become: yes | |||||
when: fw_enable_ssh | |||||
- name: Allow HTTP | |||||
iptables: | |||||
chain: INPUT | |||||
protocol: tcp | |||||
destination_port: 80 | |||||
ctstate: NEW | |||||
syn: match | |||||
jump: ACCEPT | |||||
comment: Accept new HTTP connections | |||||
become: yes | |||||
when: fw_enable_http | |||||
- name: Allow HTTPS | |||||
iptables: | |||||
chain: INPUT | |||||
protocol: tcp | |||||
destination_port: 443 | |||||
ctstate: NEW | |||||
syn: match | |||||
jump: ACCEPT | |||||
comment: Accept new HTTPS connections | |||||
become: yes | |||||
when: fw_enable_http | |||||
- name: Drop everything else | |||||
iptables: | |||||
chain: INPUT | |||||
jump: DROP | |||||
comment: Drop anything else | |||||
become: yes |
@@ -0,0 +1,15 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
if [[ -d /home/mitmproxy ]] ; then | |||||
service mitmproxy stop | |||||
fi | |||||
service nginx stop | |||||
certbot renew --standalone --non-interactive || echo "Error updating SSL certificates" | |||||
if [[ -d /home/mitmproxy ]] ; then | |||||
service mitmproxy restart | |||||
fi | |||||
service nginx restart |
@@ -0,0 +1,17 @@ | |||||
#!/bin/bash | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
LE_EMAIL="${1}" | |||||
SERVER_NAME="${2}" | |||||
SERVER_ALIAS="${3}" | |||||
if [[ $(find /etc/letsencrypt/accounts -type f -name regr.json | xargs grep -l \"${LE_EMAIL}\" | wc -l | tr -d ' ') -eq 0 ]] ; then | |||||
certbot register --agree-tos -m "${LE_EMAIL}" --non-interactive | |||||
fi | |||||
if [[ ! -f /etc/letsencrypt/live/${SERVER_NAME}/fullchain.pem || ! -f /etc/letsencrypt/live/${SERVER_ALIAS}/fullchain.pem ]] ; then | |||||
certbot certonly --standalone --non-interactive -d ${SERVER_NAME} | |||||
certbot certonly --standalone --non-interactive -d ${SERVER_ALIAS} | |||||
else | |||||
certbot renew --standalone --non-interactive | |||||
fi |
@@ -0,0 +1,59 @@ | |||||
# | |||||
# Copyright (c) 2020 Bubble, Inc. All rights reserved. For personal (non-commercial) use, see license: https://getbubblenow.com/bubble-license/ | |||||
# | |||||
- name: Install OpenSSL, nginx and software-properties-common | |||||
apt: | |||||
name: [ 'openssl', 'nginx', 'software-properties-common' ] | |||||
state: present | |||||
update_cache: yes | |||||
- name: Enable Ubuntu universe repositories | |||||
apt_repository: | |||||
repo: "{{ item }}" | |||||
state: present | |||||
loop: | |||||
- "deb http://archive.ubuntu.com/ubuntu/ bionic universe" | |||||
- "deb http://archive.ubuntu.com/ubuntu/ bionic-updates universe" | |||||
- "deb http://security.ubuntu.com/ubuntu/ bionic-security universe" | |||||
- name: Enable ppa:certbot/certbot repository | |||||
apt_repository: | |||||
repo: ppa:certbot/certbot | |||||
state: present | |||||
- name: Update packages after adding new repositories | |||||
apt: | |||||
update_cache: yes | |||||
- name: Install certbot | |||||
apt: | |||||
name: [ 'certbot' ] | |||||
state: present | |||||
update_cache: yes | |||||
- name: Ensure nginx can read cert files | |||||
file: | |||||
dest: /etc/letsencrypt | |||||
group: www-data | |||||
recurse: yes | |||||
- name: Ensure nginx is stopped | |||||
service: | |||||
name: nginx | |||||
state: stopped | |||||
- name: Install init_certbot script | |||||
copy: | |||||
src: init_certbot.sh | |||||
dest: /usr/local/bin/init_certbot.sh | |||||
owner: root | |||||
group: root | |||||
mode: 0555 | |||||
- name: Install certbot_renew.sh weekly cron job | |||||
copy: | |||||
src: "certbot_renew.sh" | |||||
dest: /etc/cron.weekly/certbot_renew.sh | |||||
owner: root | |||||
group: root | |||||
mode: 0755 |
@@ -3,15 +3,6 @@ | |||||
"_subst": true, | "_subst": true, | ||||
"name": "{{defaultDomain}}", | "name": "{{defaultDomain}}", | ||||
"publicDns": "{{TEST_DEFAULT_DNS_CLOUD}}", | "publicDns": "{{TEST_DEFAULT_DNS_CLOUD}}", | ||||
"template": true, | |||||
"roles": [ | |||||
"common-0.0.1", | |||||
"firewall-0.0.1", | |||||
"bubble-0.0.1", | |||||
"algo-0.0.1", | |||||
"mitmproxy-0.0.1", | |||||
"nginx-0.0.1", | |||||
"bubble_finalizer-0.0.1" | |||||
] | |||||
"template": true | |||||
} | } | ||||
] | ] |
@@ -1 +1 @@ | |||||
Subproject commit 45579ec8407102a82dceae37df8d23f1ab1b9686 | |||||
Subproject commit 5e49b6e3e4281c59abd127279e90299f633db53f |