wid update - readme and new tools

This commit is contained in:
vadoli 2021-08-07 11:47:49 +00:00
parent a50f7ebe76
commit 4426fe8158
10 changed files with 202 additions and 195 deletions

View file

@ -21,12 +21,15 @@ port_increments = {
"IDE_URL": 5,
"TERMINAL_URL": 6,
"MC_URL": 7,
"HTOP_URL": 8,
"FREE": 9
"HTOP_URL": 8
}
workspace_meta = {
"base-workspace": {
"port-range": 10,
"entrypoints": ["DOCS_URL", "FILEBROWSER_URL", "STATICFS_URL", "CRONICLE_URL", "UNGIT_URL", "TERMINAL_URL", "MC_URL", "HTOP_URL"]
},
"workspace-in-docker": {
"port-range": 10,
"entrypoints": ["DOCS_URL", "FILEBROWSER_URL", "STATICFS_URL", "CRONICLE_URL", "UNGIT_URL", "IDE_URL", "TERMINAL_URL", "MC_URL", "HTOP_URL"]
}
@ -106,10 +109,6 @@ def get_compose_dict(workspace_name, host_ip, start_port, user, password):
ep = {entrypoint:port+start_port for entrypoint,port in port_increments.items() if entrypoint in workspace_entrypoints}
traefik_command = [f"--entrypoints.{entrypoint}.address=:{port}" for entrypoint,port in ep.items()]
traefik_command += [
"--api",
"--api.dashboard",
"--api.insecure",
"--providers.docker",
"--providers.file.directory=/etc/traefik/dynamic_conf"
]

View file

@ -20,7 +20,7 @@ COPY supervisord-workspace-base.conf /etc/supervisord/
COPY filebrowser.json /opt/filebrowser/.filebrowser.json
COPY mkdocs /home/docs
COPY README.md /home/docs/docs/get-started.md
COPY README.md /home/docs/docs/docs.md
COPY mkdocs-requirements.txt /home/abc/installed-python-packages/mkdocs-requirements.txt
RUN echo "------------------------------------------------------ filebrowser, ungit, static server" \

View file

@ -7,7 +7,9 @@ entirely through WEB-based interfaces - its own WEB-UI, WEB-based terminal, file
#### Try it out
``` docker run --name space-1 --user=root -d -p 8020-8030:8020-8030 alnoda/base-workspace```
```
docker run --name space-1 --user=root -d -p 8020-8030:8020-8030 alnoda/base-workspace
```
## Contents
@ -39,7 +41,7 @@ entirely through WEB-based interfaces - its own WEB-UI, WEB-based terminal, file
> TL;DR
> You can provide your users with many virtual environments, manage just one server, and have less work with server configuration management.
Because Ddocker is not completely suitable to serve this purpose, Base-Workspace tries to cover some of the shortcomings: it has cron set up, supervisord, allows to start multiple processes
Because Docker is not completely suitable to serve this purpose, Base-Workspace tries to cover some of the shortcomings: it has cron set up, supervisord, allows to start multiple processes
inside the same container, has docker-in-docker, and some other applications installed, such as Git, Gitflow, wget, nano, vim etc.
In addition Base-Workspace has some applications with WEB-UI, which make it easier to scheddule and monitor job executions, browse and

View file

@ -1,75 +0,0 @@
version: "3.3"
services:
traefik:
image: "traefik:v2.4"
container_name: "traefik"
command:
- "--api"
- "--api.dashboard"
- "--api.insecure"
- "--providers.docker"
- "--entrypoints.docs.address=:8020"
- "--entrypoints.filebrowser.address=:8021"
- "--entrypoints.staticfs.address=:8022"
- "--entrypoints.cronicle.address=:8023"
- "--entrypoints.ungit.address=:8024"
- "--entrypoints.terminal.address=:8026"
ports:
- 8080:8080
- "8020-8030:8020-8030"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock:ro"
workspace:
image: alnoda/workspace-in-docker
environment:
- WRK_HOST="68.183.218.233"
labels:
# To create user:password pair, it's possible to use this command:
# In any workspace-in-docker execute in terminal: echo $(htpasswd -nB <userName>) | sed -e s/\\$/\\$\\$/g
# (csubstitute <userName> with the user name of your choice)
# and enter password (twice)
# by default the user:pass is admin:admin
# You can have multiple <user:pass> separated with ","
- "traefik.http.middlewares.basic-auth.basicauth.users=admin:$$2y$$05$$eub6CV.CwUYCCQjNBvSf5uZnzdRmVwGZ/ncxecb9O7WxCR8aLuM3K"
- "traefik.enable=true"
# docs
- "traefik.http.services.docs.loadbalancer.server.port=8020"
- "traefik.http.routers.docs.service=docs"
- "traefik.http.routers.docs.rule=PathPrefix(`/`)"
- "traefik.http.routers.docs.entrypoints=docs"
- "traefik.http.routers.docs.middlewares=basic-auth"
# filebrowser
- "traefik.http.services.filebrowser.loadbalancer.server.port=8021"
- "traefik.http.routers.filebrowser.service=filebrowser"
- "traefik.http.routers.filebrowser.rule=PathPrefix(`/`)"
- "traefik.http.routers.filebrowser.entrypoints=filebrowser"
- "traefik.http.routers.filebrowser.middlewares=basic-auth"
# static file server
- "traefik.http.services.staticfs.loadbalancer.server.port=8022"
- "traefik.http.routers.staticfs.service=staticfs"
- "traefik.http.routers.staticfs.rule=PathPrefix(`/`)"
- "traefik.http.routers.staticfs.entrypoints=staticfs"
- "traefik.http.routers.staticfs.middlewares=basic-auth"
# cronicle
- "traefik.http.services.cronicle.loadbalancer.server.port=8023"
- "traefik.http.routers.cronicle.service=cronicle"
- "traefik.http.routers.cronicle.rule=PathPrefix(`/`)"
- "traefik.http.routers.cronicle.entrypoints=cronicle"
- "traefik.http.routers.cronicle.middlewares=basic-auth"
# ungit
- "traefik.http.services.ungit.loadbalancer.server.port=8024"
- "traefik.http.routers.ungit.service=ungit"
- "traefik.http.routers.ungit.rule=PathPrefix(`/`)"
- "traefik.http.routers.ungit.entrypoints=ungit"
- "traefik.http.routers.ungit.middlewares=basic-auth"
# ide
- "traefik.http.services.terminal.loadbalancer.server.port=8026"
- "traefik.http.routers.terminal.service=ide"
- "traefik.http.routers.terminal.rule=PathPrefix(`/`)"
- "traefik.http.routers.terminal.entrypoints=ide"
- "traefik.http.routers.terminal.middlewares=basic-auth"

View file

@ -5,7 +5,7 @@
nav:
- Home: pages/home/home.md
- About: README.md
- Get started: get-started.md
- Docs: docs.md
# ===========================================================

View file

@ -52,7 +52,7 @@ COPY settings.json /home/abc/.theia/settings.json
COPY supervisord-theia.conf /etc/supervisord/
COPY mkdocs /home/docs
COPY README.md /home/docs/docs/get-started.md
COPY README.md /home/docs/docs/docs.md
ENV SHELL=/bin/bash \
THEIA_DEFAULT_PLUGINS=local-dir:/opt/theia/plugins \
@ -61,10 +61,18 @@ ENV SHELL=/bin/bash \
PATH="/home/abc/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" \
NVM_DIR=/home/abc/.nvm
RUN chown -R abc /opt/theia \
RUN && echo "------------------------------------------------------ utils" \
&& rm -rf /home/abc/utils || true \
&& git clone https://github.com/bluxmit/alnoda-workspaces /tmp/alnoda-workspaces \
&& mv /tmp/alnoda-workspaces/utils /home/abc/ \
&& rm -rf /tmp/alnoda-workspaces \
&& echo "------------------------------------------------------ user" \
&& chown -R abc /opt/theia \
&& mkdir -p /var/log/theia && chown -R abc /var/log/theia \
&& chown -R abc /home/docs \
&& chown -R abc /home/abc/utils \
&& chown -R abc /home/abc/installed-python-packages \
&& find /home -type d | xargs -I{} chown -R abc {} \
&& find /home -type f | xargs -I{} chown abc {}
USER abc

View file

@ -1,47 +1,77 @@
# Workspace-in-docker
Workspace-in-docker has all the features of the *Base-Workspace*, and in addition it includes
a powerful browser-based version of Visual Studio Code. It is a lightweight VM alternative, which provides full isolation,
consumes little resources and contains full-fledged IDE.
Workspace-in-docker - is a completely self-contained and fully isolated development environment, that runs inside docker container.
This image includes everything necessary to start coding right away. In order to use Workspace-in-docker, you don't need to
install or use any other tools, apart of docker itself. Workspace can be used on laptop, PC or launched on remote server with https
and authentication. Can be shared, backed up, and it has versions.
![Workspaces are amazing!](./img/workspace-demo.gif)
This workspace allows complete isolation of many IT-related projects, such as software development, devops, QA, data analysis, data engineering, data science and other.
Workspace can be used as local development environment, as well as remote, when started on the cloud server, and can be secured with password.
Workspace-in-docker allows to completely isolate and switch easily between projects that involve multiple programming languages, cloud CLIs,
k8s clusters, configurations and have system dependencies. Withou this workspace swithing between such complex environments would require multiple actions and can be tedious work.
Workspace-in-docker is great for collaborative work - itcan be easily shared, moved to any cloud server, backed up, has versions and can be easily extended and customized.
Can be used as build, test or even runtime environment.
Workspace-in-docker is a good choice if you want control and versatility. It is lightweight,
includes open-source tools and nearly as convenient as working directly with a local environment.
#### Try it out
```
docker run --name space-1 -d -p 8020-8035:8020-8035 alnoda/workspace-in-docker
```
## Contents
* [What's included](#what's-included)
* [Why this image](#why-this-image)
* [Use-cases](#use-cases)
* [Features](#features)
* [Launch Workspace](#launch-workspace)
* [Workspace terminal](#workspace-terminal)
* [Multiple workspaces](#multipl-workspaces)
* [Multiple workspaces](#multiple-workspaces)
* [Open more ports](#open-more-ports)
* [Run as root](#run-as-root)
* [Docker in docker](#docker-in-docker)
* [Run on remote server](#run-on-remote-server)
* [Use Workspace](#use-workspace)
* [Install new packages](#install-new-packages)
* [Install applications](#install-applications)
* [Schedule jobs with Cron](#schedule-jobs-with-cron)
* [Python](#python)
* [Node.js](#node.js)
* [Run applications and services inside the workspace](#run-applications-and-service-inside-the-workspace)
* [Run applications and services inside the workspace](#run-applications-and-services-inside-the-workspace)
* [Manage workspaces](#manage-workspaces)
* [Start and stop workspaces](#start-and-stop-workspaces)
* [Create new workspace image](#create-new-workspace-image)
* [Manage workspace images](#manage-workspace-images)
* [Save and load workspace images](#save-and-load-workspace-images)
* [Move workspace to the cloud](#move-workspace-to-the-cloud)
## What's included
## Why this image
This workspace allows complete isolation of many IT-related projects, such as software development, devops, QA, data analysis, data engineering, data science and other.
Workspace can be used as local development environment, as well as remote dev environment, when started on the cloud server, and can be secured with password.
Workspace-in-docker allows to completely isolate and switch easily between projects that involve multiple programming languages, cloud CLIs,
k8s clusters, configurations and have system dependencies. Withou this workspace, swithing between such complex environments would require multiple actions and can be tedious work.
Workspace-in-docker is great for collaborative work - it can be easily shared, moved to any cloud server, backed up, has versions and can be easily extended and customized.
Can be used as build, test or even runtime environment.
Workspace makes it extremely easy to launch small products, created by hobby developers. The fact that the development, build and runtime environment is the very same
workspace, deployment of a web application becomes as easy, as commiting workspace to a docker image and run it on any server. No need to write dockerfile, and waste time trying to replicate
your development environment in a production docker image.
Workspace-in-docker is a good choice if you want control and versatility. It is lightweight,
includes open-source tools and nearly as convenient as working directly with a local environment.
## Use-cases
- Avoid tedious process of setting dev environment on your laptop
- Move dev environments between powerful PC and laptop in a minute
- Work conveniently with multtiple IT projects on your laptop
- Run dev environment in cloud and whork from any device, being independent on any cloudd service or cloud provider
- Back-up and version control entire dev environments
- Collaborate with peers by sharing entire workspace or run it in cloud
- Move from dev to production in a minute
- Create custom dev environment for your team, and help new-comers to save time on setting up their environments
- Simple way to start coding for newbies
## Features
Workspace-in-docker has all the features of the [*Base-Workspace*](https://github.com/bluxmit/alnoda-workspaces/blob/main/workspaces/base-workspace/README.md),
in addition it includes a powerful browser-based version of Visual Studio Code. It is a lightweight VM alternative, which provides full isolation,
consumes little resources and contains full-fledged IDE.
![Workspaces are amazing!](./img/workspace-demo.gif)
- **Workspace UI** - launch all workspace tools from one place.
- [**Eclipse Theia**](https://theia-ide.org/docs/) - open source version of popular Visual Studio Code IDE. Theia is trully open-source, has
@ -51,17 +81,8 @@ VS-Code extensions and works in browser. This means it can run inside a docker c
- [**Static File Server**](https://github.com/vercel/serve) - view any static html sites as easy as if you do it on your local machine. Serve static websites easily.
- [**Ungit**](https://github.com/FredrikNoren/ungit) - rings user friendliness to git without sacrificing the versatility of it.
- [**MkDocs**](https://squidfunk.github.io/mkdocs-material/) - maintain documentation for your workspace or project with only markdown.
Built on top of Base-workspace and Ubuntu-workspace, this workspace has all the features those workspaces have.
In particular, workspace-in-docker provides excellent experience when working directly in the terminal, and has docker-in-docker.
## Use-cases
Based on Ubuntu, this workspace works best for users who know what `apt-install` is. This workspace has open-source WEB-based
VS-code variant, and a set of tools that make it easy to manage data inside docker, exchange files with local environnment, work with Git
and create beautiful documentations for your projects.
Workspace-in-docker has Python and Node.js, but if you need other runtimes such as Java, Ruby or Go, you will have to install it yourself.
- [**Midnight Commander**](https://midnight-commander.org/) - Feature rich visual file manager with internal text viewer and editor.
- [**Process Monitor**](https://htop.dev/) - Monitor running process and resource utilization.
## Launch Workspace
@ -79,21 +100,30 @@ command to execute outside of the workspace
To start a workspace simply execute in terminal
```sh
docker run --name space-1 -d -p 8020-8030:8020-8030 alnoda/workspace-in-docker
docker run --name space-1 -d -p 8020-8035:8020-8035 alnoda/workspace-in-docker
```
*(It is recommended to run workspace in the daemon mode)*
***Open [http://localhost:8020](http://localhost:8020)***
Workspace has web-based documentation with home page, from where you can open any workspace tool.
It is recommended to run workspace in the daemon mode.
Workspace has its own UI, which includes quiklaunch (home) page and documentation pages.
From the quiklaunch you can open any workspace tool. Documentation pages you modify in order
to document the project, workspace use and setup.
### Workspace terminal
There are 2 ways how to work with terminal inside the workspace-in-docker:
There are several ways how to work with terminal of the the workspace-in-docker:
- built-it in-browser terminal
- use terminal provided by in-browser IDE [http://localhost:8025](http://localhost:8025) ([unless other ports are mapped](#multiple-workspaces))
- enter running workspace docker container from your terminal
- ssh into the running the docker container (of the workspace) from your terminal
<p align="center">
<img src="https://raw.githubusercontent.com/bluxmit/alnoda-workspaces/main/workspaces/base-workspace/img/base-workspace-terminal.gif" alt="Base-Workspace terminal" width="500">
</p>
*(Browser-based terminals always work under the user you started the workspace with, the default is non root user "abc")*
If you want to enter running workspace container from your terminal execute:
```sh
@ -105,96 +135,119 @@ If you don't want to use z-shell
docker exec -it space-1 /bin/bash
```
This way allows to ssh into the workspace as a root user at any time, even if the workspace itself was not starter as root user (the default user is abc)
```sh
docker exec -it --user=root space-1 /bin/zsh
```
You can work in Ubuntu terminal now. Execute the followinng command to know your workspace user
> `whoami`
### Multiple workspaces
Every workspace requires range of ports. If one workspace is up and running, the ports 8020-8030 are taken.
Every workspace requires range of ports. If one workspace is up and running, the ports 8020-8035 are taken.
In order to start another workspace it is necessary either to stop currently runnning workspace, or to run another workspace
Workspace-in-docker itself uses 9 ports (8020-8028), but it is recommended to map several extra ports just in case. Having extra ports,
you can always launch new applications on these ports, and they will be immediately exposed outside of the workspace.
In order to start another workspace, you either need to stop currently runnning workspace, or to run another workspace
on the different port range.
If you are planning to run multiple workspaces at the same time, you can run second workspace with different port range
If you are planning to run more than one workspace at the same time, you can run another workspace with
the different port range, for example
```sh
docker run --name space-2 -d -p 8040-8050:8020-8030 -e ENTRY_PORT=8040 alnoda/workspace-in-docker
```
Notice that in addition we need to set environmental variable ENTRY_PORT, which should be equal to the first port in the new range.
This is needed for the documentation main page to set up correct links to other tools (Filebrowser, Cronicle etc.)
Workspace UI usues this variable to know the new port range, and redirects to the proper addresses of the workspace applications' UIs.
### Open more ports
We started workspace container with a port range mapped "-p 8020-8030". If you are planning to expose more applications
We started workspace container with a port range mapped "-p 8020-8035". If you are planning to expose more applications
from inside of a container, add additional port mapping, for example
```sh
docker run --name space-1 -d -p 8020-8030:8020-8030 -p 8080:8080 alnoda/workspace-in-docker
docker run --name space-1 -d -p 8020-8035:8020-8035 -p 8080:8080 alnoda/workspace-in-docker
```
You can add multiple port mappings:
```sh
docker run --name space-1 -d -p 8020-8030:8020-8030 -p 8080:8080 -p 443:443 alnoda/workspace-in-docker
docker run --name space-1 -d -p 8020-8035:8020-8035 -p 8080:8080 -p 443:443 alnoda/workspace-in-docker
```
**NOTE:** It is not a problem if you don't expose any ports, but later on realise you need them -
you will just create new image, and run it exposing the required port (look in the section [Create new image](#create-new-workspace-image))
### Run as root
The default user is **abc** with passwordless sudo to install packages. If you'd rather work as root, then you should ssh into running container as
```sh
docker exec -it --user=root space-1 /bin/zsh
```
You can of course open several terminals to the same running containner as both abc and root users at the same time.
you will just create new image, and run it exposing the required port (look in the section [Create new image](#create-new-image))
### Docker in docker
It is possible to work with docker directly from the workspace. In order to be able to use docker directly inside the workspace,
start the workspace with mounting `/var/run/docker.sock` and using root user:
It is possible to work with docker directly from the workspace (using workspace terminal).
```
docker run --name space-1 -d -p 8020-8030:8020-8030 -v /var/run/docker.sock:/var/run/docker.sock --user=root alnoda/workspace-in-docker
docker run --name space-1 -d -p 8020-8035:8020-8035 -v /var/run/docker.sock:/var/run/docker.sock alnoda/workspace-in-docker
```
Alternatively you can run workspace as non-root
NOTE: in order to use docker in docker you need to or enter into the workspace container as root
```sh
docker run --name space-1 -d -p 8020-8030:8020-8030 -v /var/run/docker.sock:/var/run/docker.sock alnoda/workspace-in-docker
```
but whenever you want to use docker enter into the workspace container as root
```
docker exec -it --user=root space-1 /bin/zsh
```
### Run on remote server
Because workspace is just a docker image, running it in cloud is as easy as running it on local laptop. There are only 3 steps involved:
Because workspace is just a docker image, running it in any other server is as easy as running it on local laptop.
Running on remote server makes it much simpler to collaborate, because you can just share credentials to the workspace with your peers, and they will be able to use it.
You can also run applications that should run permanently, and run jobs on schedule.
#### Unsecure remote workspace
The simplest deployment of the workkspace requires only 3 steps:
- get virtual server on your favourite cloud (Digital Ocean, Linode, AWS, GC, Azure ...)
- [install docker](https://docs.docker.com/engine/install/) on this server
- ssh to the remote server and start workspace with envronmental variable `-e WRK_HOST="<ip-of-your-remote-server>"`
- ssh to the remote server and start workspace
```
docker run --name space-1 -d -p 8020-8030:8020-8030 -e WRK_HOST="<ip-of-your-remote-server>" alnoda/workspace-in-docker
docker run --name space-1 -d -p 8020-8035:8020-8035 -e WRK_HOST="<ip-of-your-remote-server>" alnoda/workspace-in-docker
```
if docker-in-docker needed then
**NOTE:** When running workspace on the remote server, add envronmental variable `-e WRK_HOST="<ip-of-your-remote-server>"`.
Workspace UI needss this variable to know how redirect properly to the workspace applications' UIs.
Open in your browser `<ip-of-your-remote-server>:8020`
If docker-in-docker is required, then
```
docker run --name space-1 -d -p 8020-8030:8020-8030 -e WRK_HOST="<ip-of-your-remote-server>" -v /var/run/docker.sock:/var/run/docker.sock alnoda/workspace-in-docker
docker run --name space-1 -d -p 8020-8035:8020-8035 -e WRK_HOST="<ip-of-your-remote-server>" -v /var/run/docker.sock:/var/run/docker.sock alnoda/workspace-in-docker
```
Open in your browser `<ip-of-your-remote-server>:8020`
This way launches workspace in cloud, but such workspace is not secure, everyone who knows IP of your server will be able to use it.
When running workspace-in-docker on the remote server, it is useful to add authentication mechanism, otherwise anyone in the world
who gets to know the IP of the remote server will be able to use your workspace. We have created a docker-compose file, that will
let you launching workspace with authentication - [read the instructions here](https://github.com/Alnoda/workspaces-in-docker/blob/main/workspaces/workspace-in-docker/md/auth-for-remote-workspace.md)
#### Secure remote workspace
> [Check out the complete docs](https://alnoda.org) to know more.
*You might want to restrict access to the workspace, and secure encrypted communication with the workspace*
Workspace-in-docker contains utility that will generate everything needed to launch the workspace in cloud in a secure way, with authentication and with TLS.
If you want to run workspace on the remote server securely, start workspace-in-docker on your local laptop first, open its terminal and
use utility `/home/abc/utils/remote.py` to generate create docker-compose project with TLS certificates. Simply execute
> `python /home/abc/utils/remote.py --workspace="workspace-in-docker" --port="8020" --host="68.183.69.198" --user="user1" --password="pass1"`
**NOTE:** you have to specify the correct host (IP of the server you want to run the workspace on), and user and password of your choice.
You see folder `/home/abc/utils/remote` is created. Copy this folder to the remote server (any location). Ssh to the server, cd into
the directory you copied and execute `docker-compose up -d`.
That's it, you workspace is running securely on the remote server, using
self-signed TLS certificates for encrypted https communication between you laptop and the remote workspace,
and authentication is added.
## Use Workspace
The common actions inside the workspace include
Among the common actions you'd do in the workspace are
- installation of new applications and runtimes
- edit files, write code, scripts
@ -203,11 +256,12 @@ The common actions inside the workspace include
- schedule tasks and scripts
- process data
### Install new packages
Install new packages with ```sudo apt install```. The default abc user is allowed to install packages.
### Install applications
For example, in order to install [Emacs text editor](https://www.gnu.org/software/emacs/) make sure you
have entered running docker container (of the workspace), and execute in terminal
Use workspace workspace terminal to install new applications.
Install with ```sudo apt install```. The default *abc* user is allowed to install packages.
For example, in order to install [Emacs text editor](https://www.gnu.org/software/emacs/) open workspace terminal, and execute
> `sudo apt install emacs`
@ -216,7 +270,7 @@ have entered running docker container (of the workspace), and execute in termina
Schedule execution of any task with cron - a time-based job scheduler in Unix-like computer operating systems.
In order to create scheduled job enter running docker container, and execute in terminal
Open workspace terminal, and execute
> `crontab -e`
@ -225,7 +279,6 @@ In the end of the opened file add line
> `* * * * * echo $(whoami) >> /home/cron.txt`
This will print every minute username to file */home/cron.txt* . *(Hit Ctrl+X to exit nano)*
Hint: example of cron job definition:
@ -241,11 +294,12 @@ Hint: example of cron job definition:
**NOTE** you can disconnect from the image and close terminal - cron will continue working.
> In addition to the commonly known ***cron scheduler*** you can use Cronicle - the tool with Web UI and great features
> which is bundeled together with the workspace-in-docker.
> Instead of cron you might want to use Cronicle - a tool with Web UI, and a great list of features
> that will provide you with the dashboard, list of executions and statistics, even let you ser limis
> on resources for each jobs, and create depenndencies between jobs.
### Python
Python, Pip and Venv are installed. To start python console, enter running docker container, and execute in terminal
Python and Pip are installed. To use python console, open workspace terminal and execute
> `python`
@ -259,16 +313,10 @@ you make the most of using Python interactively. Install and start ipython
> ```pip install ipython```
> `ipython`
Example of using [venv](https://docs.python.org/3/tutorial/venv.html)
> `cd ~p; mkdir /home/project/venv-test; cd venv-test`
> `python3 -m venv example-env`
> `source example-env/bin/activate`
### Node.js
We recommend to use nodeenv to create different node environments.
For example, create folder npmgui, and activate environment with node v. 12.18.3 and npm v.6.0.0 (make sure you are inside workspace docker container)
For example, open workspace terminal, create folder npmgui, and activate environment with node v. 12.18.3 and npm v.6.0.0
> `cd /home`
> `mkdir npmgui; cd npmgui`
@ -281,22 +329,19 @@ Let's install package and start node application
Open your browser on http://localhost:8030/
**NOTE:** If you close terminal, the application will stop. See how to [start applications that keep running after closing a workspace terminal](#run-applications-and-services-inside-the-workspace)
**NOTE:** If you close terminal, the application will stop. See how to [start applications that reamin live after closing a workspace terminal](#run-applications-and-services-inside-the-workspace)
### Run applications and services inside the workspace
If you want application to keep running after terminal is closed start it with **"&!"** at the end.
If you want application to keep running after workspace terminal is closed start it with **"&!"** at the end.
For example, enter into the running workspace container, and start the example node application from the previous section:
For example, in the last section we started *npm-gui* tool with command `npm-gui 0.0.0.0:8030`. If you close the workspace terminal,
this application witll stop running. To keep it running after terminal is closed, execute
> `cd /home/npmgui`
> `. env/bin/activate && npm i -g npm-gui &!`
Now, if you disconnect from the workspace and close terminal, the application will still continue running in the workspace, untill [workspace is stopped](#start-and-stop-workspaces).
If you want application to start automatically each time workspaces is restarted, or the new workspace is created, see [running applications permanently](extend.md#add-applications-and-services)
> `npm-gui 0.0.0.0:8030 &!`
Now, if you disconnect from the workspace and close terminal, the application will continue running in the workspace, untill [workspace is stopped](#start-and-stop-workspaces).
## Manage workspaces
@ -306,7 +351,7 @@ There are two concepts to keep in mind: **images** and **containers**. Images ar
is an image. When you execute this command
```sh
docker run --name space-1 -d -p 8020-8030:8020-8030 alnoda/workspace-in-docker
docker run --name space-1 -d -p 8020-8035:8020-8035 alnoda/workspace-in-docker
```
you create container called **space-1** from the image **alnoda/workspace-in-docker**. You can create any number of containers, but you need to
[map different ports to each of them](#multiple-workspaces).
@ -321,7 +366,7 @@ Essentially, this means *"take my workspace and create new image with all the ch
The workspace started in daemon mode will continue working in the background.
See all the running docker containers (including workspaces)
See all the running docker containers
```
docker ps
@ -388,9 +433,9 @@ docker rmi -f alnoda/workspace-in-docker
### Save and load workspace images
After you commit workspace container, and create new image out of it, you can push it to your docker registry or save it as a file.
After you commit workspace container, and create new image out of it, you can push it to your docker registry or save it in a file.
#### Save workspace as file
#### Save workspace in a file
Assuming you created new image **space-image:0.4** from your workspace, you can save it as a tar file
@ -410,11 +455,39 @@ And restore it from the tar file
docker load < space-image-0.4.tar
```
#### Push workspace to private docker registry
#### Push workspace to a registry
A better way to manage images is docker registries. You can use docker registries in multiple clouds. They are cheap annd very convenient.
Check out for example, [Registry in DigitalOcean](https://www.digitalocean.com/products/container-registry/) or in [Scaleway container registry](https://www.scaleway.com/en/container-registry/). There are more.
Pushing image to registry is merely 2 extra commands: 1) tag image; 2) push image
You will be able to pull image on any device, local or cloud.
You will be able to pull image on any device, local or cloud.
### Move workspace to the cloud
Ease of running workspace in cloud, and ability to move workspaces between local machine and remote server -
is one of the main features of the workspace, and the reasonn why the workspace is entirely in docker.
It is often a case that experiment, which started on personal notebook require more computational
resources, must be running for a long period of time, or executed periodically. All of these cases are
the reasons to move a workspace to the cloud server. Usually it is a hassle, but this workspace can be moved
to the remote server easily.
The easiest way to move workspace to the cloud is to get your private docker registry. Then moving a workspace from a laptop to
a remote server is only 3 commands:
1. [Commit workspace to the a image](#create-new-workspace-image)
2. [Push workspace to your docker registry](https://docs.docker.com/engine/reference/commandline/push/)
3. ssh to remote server, and [run workspace there](#run-on-remote-server)
If you don't want to use container registry, then there are 2 steps more involved:
1. [Commit workspace to the a image](#create-new-workspace-image)
2. [Save image to file](save-and-loa-images)
3. Copy file to remote server. There are many options:
- Launch filexchange workspace on the remote server
- Use [cyberduck](https://cyberduck.io/)
- use [scp](https://linuxize.com/post/how-to-use-scp-command-to-securely-transfer-files/)
4. [Load workspace image from file](#save-and-load-workspace-images) on the remote server
5. [Start workspace on the remote server](#run-on-remote-server)

View file

@ -10,8 +10,8 @@ port_increments = {
"STATICFS_URL": 2,
"CRONICLE_URL": 3,
"UNGIT_URL": 4,
"TERMINAL_URL": 6,
"IDE_URL": 5,
"TERMINAL_URL": 6,
"MC_URL": 7,
"HTOP_URL": 8
}

View file

@ -5,7 +5,7 @@
nav:
- Home: pages/home/home.md
- About: README.md
- Get started: get-started.md
- Docs: docs.md
# ===========================================================