You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 

585 lines
13 KiB

%\documentclass[12pt]{article}
%\usepackage{geometry}
%\geometry{margin=1in}
%\usepackage{listings}
%\usepackage{setspace} % For setting line spacing
%
%\begin{document}
\newpage
\appendix % This command ensures correct section numbering
\section*{APPENDIX A: SUPPLEMENTARY MATERIAL} % Main appendix title - all caps
\label{appendix:supp_material} % Label for the appendix
\subsection*{A.1 Additional Data} % Sub-section within the appendix
\doublespacing % Apply double spacing here
The following code template are used to provide services for component of system mentions in table \ref{table:spec}. To modify the services and software, refer to next section for configuration.
\begin{itemize}
\item A more detailed breakdown of experimental results.
\item Additional statistical analyzes.
\item Copies of survey instruments.
\end{itemize}
\singlespacing % Revert to single spacing if needed for other parts
\subsection*{A.2 Code Template}
\lstset{
language=bash, % Changed from 'none' to a specific language
caption=Kubernetes Deployment Template,
basicstyle=\ttfamily\footnotesize,
breaklines=true,
%numbers=left, % If you want line numbers
numberstyle=\tiny\color{gray}, % Style for line numbers
stepnumber=1,
numbersep=5pt,
frame=single, % Add a frame around the code
%backgroundcolor=\color{yellow!10}, % Light background color
}
\begin{lstlisting}
---
# Deployment for Mosquitto
apiVersion: apps/v1
kind: Deployment
metadata:
name: mosquitto-deployment
labels:
app: mosquitto
spec:
replicas: 1
selector:
matchLabels:
app: mosquitto
template:
metadata:
labels:
app: mosquitto
spec:
containers:
- name: mosquitto-mqtt
image: eclipse-mosquitto
ports:
- containerPort: 1883
- containerPort: 1990
volumeMounts:
- name: mosquitto-config
mountPath: /mosquitto/config
- name: mosquitto-data
mountPath: /mosquitto/data
- name: mosquitto-log
mountPath: /mosquitto/log
volumes:
- name: mosquitto-config
persistentVolumeClaim:
claimName: mosquitto-config-pvc
- name: mosquitto-data
persistentVolumeClaim:
claimName: mosquitto-data-pvc
- name: mosquitto-log
persistentVolumeClaim:
claimName: mosquitto-log-pvc
---
# Service for Mosquitto
apiVersion: v1
kind: Service
metadata:
name: mosquitto-service
labels:
app: mosquitto
spec:
selector:
app: mosquitto
ports:
- name: mqtt
protocol: TCP
port: 1883
targetPort: 1883
- name: admin
protocol: TCP
port: 1990
targetPort: 1990
type: ClusterIP # Or NodePort, if you need external access
---
# Persistent Volume Claims for Mosquitto
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mosquitto-config-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi # Adjust as needed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mosquitto-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi # Adjust as needed
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mosquitto-log-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi # Adjust as needed
---
# Deployment for Home Assistant
apiVersion: apps/v1
kind: Deployment
metadata:
name: home-assistant-deployment
labels:
app: home-assistant
spec:
replicas: 1
selector:
matchLabels:
app: home-assistant
template:
metadata:
labels:
app: home-assistant
spec:
containers:
- name: home-assistant
image: "ghcr.io/home-assistant/home-assistant:stable"
ports:
- containerPort: 8123
volumeMounts:
- name: hass-config
mountPath: /config
- name: hass-localtime
mountPath: /etc/localtime
securityContext:
privileged: true # Required for some Home Assistant features
volumes:
- name: hass-config
persistentVolumeClaim:
claimName: hass-config-pvc
- name: hass-localtime
hostPath:
path: /etc/localtime # Mount from the host
---
# Service for Home Assistant
apiVersion: v1
kind: Service
metadata:
name: home-assistant-service
labels:
app: home-assistant
spec:
selector:
app: home-assistant
ports:
- protocol: TCP
port: 8123
targetPort: 8123
type: ClusterIP # Or NodePort
---
# Persistent Volume Claim for Home Assistant config
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hass-config-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi # Adjust as needed
---
# Deployment for Node-RED
apiVersion: apps/v1
kind: Deployment
metadata:
name: node-red-deployment
labels:
app: node-red
spec:
replicas: 1
selector:
matchLabels:
app: node-red
template:
metadata:
labels:
app: node-red
spec:
containers:
- name: node-red-container
image: nodered/node-red:latest
ports:
- containerPort: 1880
volumeMounts:
- name: nodered-data
mountPath: /data
env:
- name: TZ
value: America/Los_Angeles
securityContext:
privileged: true
volumes:
- name: nodered-data
persistentVolumeClaim:
claimName: nodered-data-pvc
---
# Service for Node-RED
apiVersion: v1
kind: Service
metadata:
name: node-red-service
labels:
app: node-red
spec:
selector:
app: node-red
ports:
- protocol: TCP
port: 1880
targetPort: 1880
type: ClusterIP # Or NodePort
---
# Persistent Volume Claim for Node-RED data
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nodered-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi # Adjust as needed
---
# Deployment for Grafana
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana-deployment
labels:
app: grafana
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- name: grafana
image: grafana/grafana:latest
ports:
- containerPort: 3000
volumeMounts:
- name: grafana-data
mountPath: /var/lib/grafana
env:
- name: GF_SECURITY_ADMIN_USER
value: admin
- name: GF_SECURITY_ADMIN_PASSWORD
value: password # Change this! Use a real password.
volumes:
- name: grafana-data
persistentVolumeClaim:
claimName: grafana-data-pvc
---
# Service for Grafana
apiVersion: v1
kind: Service
metadata:
name: grafana-service
labels:
app: grafana
spec:
selector:
app: grafana
ports:
- protocol: TCP
port: 3000
targetPort: 3000
type: ClusterIP # Or NodePort
---
# Persistent Volume Claim for Grafana data
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi # Adjust as needed
---
# Deployment for MinIO (Object Storage)
apiVersion: apps/v1
kind: Deployment
metadata:
name: minio-deployment
labels:
app: minio
spec:
replicas: 1
selector:
matchLabels:
app: minio
template:
metadata:
labels:
app: minio
spec:
containers:
- name: minio
image: minio/minio:latest
ports:
- containerPort: 9000
- containerPort: 9001
env:
- name: MINIO_ROOT_USER
value: minio
- name: MINIO_ROOT_PASSWORD
value: minio123 # Change this! Use a real password
volumeMounts:
- name: minio-data
mountPath: "/data"
command: ["server", "/data", "--console-address", ":9001"]
volumes:
- name: minio-data
persistentVolumeClaim:
claimName: minio-data-pvc
---
# Service for MinIO
apiVersion: v1
kind: Service
metadata:
name: minio-service
labels:
app: minio
spec:
selector:
app: minio
ports:
- name: minio-api
protocol: TCP
port: 9000
targetPort: 9000
- name: minio-console
protocol: TCP
port: 9001
targetPort: 9001
type: ClusterIP # Or NodePort
---
# Persistent Volume Claim for MinIO data
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: minio-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi # Adjust as needed
---
# Deployment for ReadTheDocs
apiVersion: apps/v1
kind: Deployment
metadata:
name: readthedocs-deployment
labels:
app: readthedocs
spec:
replicas: 1
selector:
matchLabels:
app: readthedocs
template:
metadata:
labels:
app: readthedocs
spec:
containers:
- name: readthedocs
image: readthedocs/readthedocs:latest
ports:
- containerPort: 8000
volumeMounts:
- name: readthedocs-data
mountPath: /home/docs
env:
- name: DJANGO_SECRET_KEY
value: "my-secret-key" # Change this! Use a real secret key
- name: DATABASE_URL
value: "postgres://user:password@postgres:5432/readthedocs" # Update
volumes:
- name: readthedocs-data
persistentVolumeClaim:
claimName: readthedocs-data-pvc
---
# Service for ReadTheDocs
apiVersion: v1
kind: Service
metadata:
name: readthedocs-service
labels:
app: readthedocs
spec:
selector:
app: readthedocs
ports:
- protocol: TCP
port: 8000
targetPort: 8000
type: ClusterIP # Or NodePort
---
# Persistent Volume Claim for ReadTheDocs data
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: readthedocs-data-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi # Adjust as needed
---
# Nginx Reverse Proxy Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-reverse-proxy
spec:
replicas: 1
selector:
matchLabels:
app: nginx-reverse-proxy
template:
metadata:
labels:
app: nginx-reverse-proxy
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
volumeMounts:
- name: nginx-config
mountPath: /etc/nginx/conf.d
volumes:
- name: nginx-config
configMap:
name: nginx-configmap
---
# Nginx Reverse Proxy Service
apiVersion: v1
kind: Service
metadata:
name: nginx-reverse-proxy-service
spec:
selector:
app: nginx-reverse-proxy
ports:
- port: 80
targetPort: 80
type: NodePort # Expose the service
---
# Nginx ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-configmap
data:
default.conf: |
server {
listen 80;
server_name grafana.local;
location / {
proxy_pass http://grafana-service:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
server {
listen 80;
server_name nodered.local;
location / {
proxy_pass http://node-red-service:1880;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
server {
listen 80;
server_name docs.local;
location / {
proxy_pass http://readthedocs-service:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
\end{lstlisting}
\subsection*{A.3 ReadTheDocs Configuration}
ReadTheDocs uses a configuration file, typically named `.readthedocs.yaml`, to define how your documentation is built. This file should be located in the root of your Git repository. Here's a basic example:
\begin{lstlisting}[language=Bash]
# .readthedocs.yaml
version: 2.0
build:
os: ubuntu-22.04
tools:
python: "3.11"
apt_packages:
- libffi-dev
- zlib1g-dev
python:
install:
- requirements: docs/requirements.txt
system_dependencies:
- pkg: libpq-dev
version: ">=12"
sphinx:
configuration: docs/conf.py
builder: html
fail_on_warning: false
formats:
- pdf
- epub
- htmlzip
\end{lstlisting}
This configuration does the following:
\begin{itemize}
\item Specifies Read the Docs configuration schema version.
\item Sets the operating system.
\item Sets the Python version.
\item Installs OS-level dependencies.
\item Installs Python dependencies from a requirements file.
\item Specifies the location of your Sphinx configuration file.
\item Builds documentation.
\item Configures the output formats.
\end{itemize}
%\end{document}