Skip to content

Commit

Permalink
Merge pull request #6 from CoreViewInc/wait-for-pod-running-state-bef…
Browse files Browse the repository at this point in the history
…ore-logs

feat: improve CoreNiko getting started guide and error handling
  • Loading branch information
DeanHnter authored Apr 26, 2024
2 parents e921541 + d4517d2 commit d28cd0d
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 11 deletions.
2 changes: 1 addition & 1 deletion Client/cmd/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ var (
err := DockerCLI.Service.BuildImage(buildOptions, contextPath, dockerfilePath)
if err!=nil{
fmt.Println(err)
os.Exit(1)
os.Exit(2)
}
},
}
Expand Down
5 changes: 2 additions & 3 deletions Client/kaniko/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,6 @@ func (kd *KanikoDocker) BuildImage(options shared.BuildOptions, contextPath, doc
if len(registry) >0{
err = kd.manuallogin("username","password",registry,false)
if err !=nil{
fmt.Println("return heeerexxxx")
return err
}
}
Expand All @@ -161,7 +160,7 @@ func (kd *KanikoDocker) BuildImage(options shared.BuildOptions, contextPath, doc
return err
}
}
fmt.Println("Kaniko build with new dir complete.")
fmt.Println("Kaniko build complete.")
} else {
fmt.Println("Executor is not of type *KanikoExecutor and does not have a Context field.")
}
Expand All @@ -187,7 +186,7 @@ func (kd *KanikoDocker) manuallogin(username string,password,url string,validate
}
}
}else{
fmt.Println("No passowrd or username received!")
fmt.Println("No password or username received!")
}
err := dockerauth.CreateDockerConfigJSON()
if err !=nil{
Expand Down
18 changes: 11 additions & 7 deletions Client/kubernetes/kuberentes.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,12 @@ func StreamJobLogs(namespace, jobName string) error {
fmt.Println("Looking for pods created by job...")

var pods *corev1.PodList
retryTimeout := time.After(5 * time.Minute)
retryTimeout := time.After(3 * time.Minute)
retryTicker := time.NewTicker(2 * time.Second)
podFound := false

for !podFound {
fmt.Println("Checking...")
select {
case <-retryTimeout:
return fmt.Errorf("timeout reached while looking for pods for job %s", jobName)
Expand All @@ -77,13 +78,12 @@ func StreamJobLogs(namespace, jobName string) error {
}
}
}
retryTicker.Stop() // Stop the ticker

podName := pods.Items[0].Name

fmt.Println("Waiting for pod to be in Running state...")
fmt.Println("Build pod Found, Waiting for Build pod to be in Running or Completed state...")
podReady := false
for !podReady {
fmt.Println("Waiting for status update...")
select {
case <-retryTimeout:
return fmt.Errorf("timeout reached while waiting for pod to be ready for job %s", jobName)
Expand All @@ -92,14 +92,18 @@ func StreamJobLogs(namespace, jobName string) error {
if err != nil {
return fmt.Errorf("failed to get pod %s: %w", podName, err)
}
if pod.Status.Phase == corev1.PodRunning {
// Check if pod is in Running state or has already succeeded
if pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded {
podReady = true
break // Exit the loop once the pod is ready
fmt.Printf("Build pod %s is ready (Phase: %s).\n", podName, pod.Status.Phase)
break
}
fmt.Println("Pod is not ready yet, retrying...")
fmt.Println("Build pod is not ready yet, retrying...")
}
}

retryTicker.Stop() //done with timer

fmt.Printf("Streaming logs from pod %s...\n", podName)
logOptions := &corev1.PodLogOptions{Follow: true}
req := clientset.CoreV1().Pods(namespace).GetLogs(podName, logOptions)
Expand Down
32 changes: 32 additions & 0 deletions How-To/getting_started.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# 🚀 Getting Started with CoreNiko

Welcome to CoreNiko! A powerful tool designed to seamlessly integrate into your Kubernetes cluster, enhancing your capability to manage resources efficiently. This guide will walk you through the essential steps to get CoreNiko up and running within your environment.

### 🔧 Prerequisites
Before diving into the setup, ensure that you have a Kubernetes cluster deployed and ready for action. Familiarity with Docker, Kubernetes, and the concept of RBAC (Role-Based Access Control) will also be beneficial.

### 📦 Installation

To get started with CoreNiko, follow these simple steps:

1. **Download the Latest Release:** Visit our [Releases Page](#) to find the latest version of CoreNiko. Download the appropriate build for your system and prepare to integrate it into your Dockerfile/image as part of your build process. This ensures that CoreNiko is packaged into the container that will be deployed to your Kubernetes environment.

2. **Deploy CoreNiko:** Deployment is straightforward, but it requires the correct RBAC permissions to function correctly. CoreNiko needs to have sufficient permissions to create, delete, and update resources within the specific namespace it operates in.

For your convenience, we've provided an example YAML configuration file that outlines the necessary RBAC permissions. Please configure these permissions by applying the `Deploy/deploy.yaml` configuration to your cluster:
```shell
kubectl apply -f Deploy/deploy.yaml
```

### 🛠 Configuration
After installing CoreNiko, you may need to perform additional configurations to tailor it to your specific needs. Configuration options will be made available in the application's documentation.
### 📚 Documentation
For a more in-depth understanding of CoreNiko, including advanced configuration options, usage examples, and troubleshooting tips, please refer to our [Documentation Page](#).
### 🤝 Support
Encountering issues? We're here to help! Reach out to our support team or join the community forum to ask questions, report bugs, or request new features.

---

We're excited to see how CoreNiko will streamline your Kubernetes resource management. Happy deploying! 🚀

0 comments on commit d28cd0d

Please sign in to comment.