Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Error message too long and confusing #38

Open
kononovn opened this issue Jul 1, 2019 · 1 comment
Open

Error message too long and confusing #38

kononovn opened this issue Jul 1, 2019 · 1 comment

Comments

@kononovn
Copy link

kononovn commented Jul 1, 2019

Replication:
Create VM-A based on the following yaml:

``apiVersion: kubevirt.io/v1alpha3
kind: VirtualMachine
metadata:
metadata:
  name: vma        
  namespace: myproject
spec:
  running: false
  template:
    spec:
      domain:
        devices:
          disks:
            - name: containerdisk
              disk:
                bus: virtio
            - name: cloudinitdisk
              disk:
                bus: virtio
          interfaces:
            - name: default
              masquerade: {}
            - name: br1
              macAddress: 02:54:00:6a:9a:15 
              bridge: {}
        resources:
          requests:
            memory: 1G      
        cpu:
          cores: 2
      volumes:
        - name: containerdisk
          containerDisk:
            image: kubevirt/fedora-cloud-container-disk-demo:latest
        - name: cloudinitdisk
          cloudInitNoCloud:
            userData: |-
              #cloud-config
              password: fedora
              chpasswd: { expire: False }
              runcmd:
                - sudo nmcli con mod 'Wired connection 1' ipv4.address 10.201.0.1/24
                - sudo nmcli con mod 'Wired connection 1' ipv4.method manual
                - sudo nmcli con up 'Wired connection 1'
      networks:
        - name: default
          pod: {}
        - multus:
            networkName: br1
          name: br1
      nodeSelector: 
         kubernetes.io/hostname: working-8v662-worker-0-rdjsp

Add new interface to yaml with same mac address. Use yaml below:

apiVersion: kubevirt.io/v1alpha3
kind: VirtualMachine
metadata:
metadata:
  name: vma        
  namespace: myproject
spec:
  running: false
  template:
    spec:
      domain:
        devices:
          disks:
            - name: containerdisk
              disk:
                bus: virtio
            - name: cloudinitdisk
              disk:
                bus: virtio
          interfaces:
            - name: default
              masquerade: {}
            - name: br1
              macAddress: 02:54:00:6a:9a:15 
              bridge: {}
            - name: br2
              macAddress: 02:54:00:6a:9a:15
              bridge: {}
        resources:
          requests:
            memory: 1G      
        cpu:
          cores: 2
      volumes:
        - name: containerdisk
          containerDisk:
            image: kubevirt/fedora-cloud-container-disk-demo:latest
        - name: cloudinitdisk
          cloudInitNoCloud:
            userData: |-
              #cloud-config
              password: fedora
              chpasswd: { expire: False }
              runcmd:
                - sudo nmcli con mod 'Wired connection 1' ipv4.address 10.201.0.1/24
                - sudo nmcli con mod 'Wired connection 1' ipv4.method manual
                - sudo nmcli con up 'Wired connection 1'
      networks:
        - name: default
          pod: {}
        - multus:
            networkName: br1
          name: br1
        - multus:
            networkName: br2
          name: br1
      nodeSelector: 
         kubernetes.io/hostname: working-8v662-worker-0-rdjsp

Try to reapply this yaml
oc apply -f yaml
BUG: you will receive the following error:
Error from server (InternalError): error when applying patch:
{"metadata":{"annotations":{"kubectl.kubernetes.io/last-applied-configuration":"{"apiVersion":"kubevirt.io/v1alpha3","kind":"VirtualMachine","metadata":{"annotations":{},"name":"vma","namespace":"myproject"},"spec":{"running":false,"template":{"spec":{"domain":{"cpu":{"cores":2},"devices":{"disks":[{"disk":{"bus":"virtio"},"name":"containerdisk"},{"disk":{"bus":"virtio"},"name":"cloudinitdisk"}],"interfaces":[{"masquerade":{},"name":"default"},{"bridge":{},"macAddress":"02:54:00:6a:9a:15","name":"br1"},{"bridge":{},"macAddress":"02:54:00:6a:9a:15","name":"br2"}]},"resources":{"requests":{"memory":"1G"}}},"networks":[{"name":"default","pod":{}},{"multus":{"networkName":"br1"},"name":"br1"},{"multus":{"networkName":"br2"},"name":"br1"}],"nodeSelector":{"kubernetes.io/hostname":"working-8v662-worker-0-rdjsp"},"volumes":[{"containerDisk":{"image":"kubevirt/fedora-cloud-container-disk-demo:latest"},"name":"containerdisk"},{"cloudInitNoCloud":{"userData":"#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nruncmd:\n - sudo nmcli con mod 'Wired connection 1' ipv4.address 10.201.0.1/24\n - sudo nmcli con mod 'Wired connection 1' ipv4.method manual\n - sudo nmcli con up 'Wired connection 1'"},"name":"cloudinitdisk"}]}}}}\n"}},"spec":{"template":{"spec":{"domain":{"devices":{"interfaces":[{"masquerade":{},"name":"default"},{"bridge":{},"macAddress":"02:54:00:6a:9a:15","name":"br1"},{"bridge":{},"macAddress":"02:54:00:6a:9a:15","name":"br2"}]}}}}}}
to:
Resource: "kubevirt.io/v1alpha3, Resource=virtualmachines", GroupVersionKind: "kubevirt.io/v1alpha3, Kind=VirtualMachine"
Name: "vma", Namespace: "myproject"
Object: &{map["apiVersion":"kubevirt.io/v1alpha3" "kind":"VirtualMachine" "metadata":map["resourceVersion":"5431264" "selfLink":"/apis/kubevirt.io/v1alpha3/namespaces/myproject/virtualmachines/vma" "uid":"116f95c0-9bdd-11e9-a14f-52fdfc072182" "annotations":map["kubectl.kubernetes.io/last-applied-configuration":"{"apiVersion":"kubevirt.io/v1alpha3","kind":"VirtualMachine","metadata":{"annotations":{},"name":"vma","namespace":"myproject"},"spec":{"running":false,"template":{"spec":{"domain":{"cpu":{"cores":2},"devices":{"disks":[{"disk":{"bus":"virtio"},"name":"containerdisk"},{"disk":{"bus":"virtio"},"name":"cloudinitdisk"}],"interfaces":[{"masquerade":{},"name":"default"},{"bridge":{},"macAddress":"02:54:00:6a:9a:15","name":"br1"}]},"resources":{"requests":{"memory":"1G"}}},"networks":[{"name":"default","pod":{}},{"multus":{"networkName":"br1"},"name":"br1"},{"multus":{"networkName":"br2"},"name":"br1"}],"nodeSelector":{"kubernetes.io/hostname":"working-8v662-worker-0-rdjsp"},"volumes":[{"containerDisk":{"image":"kubevirt/fedora-cloud-container-disk-demo:latest"},"name":"containerdisk"},{"cloudInitNoCloud":{"userData":"#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nruncmd:\n - sudo nmcli con mod 'Wired connection 1' ipv4.address 10.201.0.1/24\n - sudo nmcli con mod 'Wired connection 1' ipv4.method manual\n - sudo nmcli con up 'Wired connection 1'"},"name":"cloudinitdisk"}]}}}}\n"] "creationTimestamp":"2019-07-01T08:48:59Z" "generation":'\x01' "name":"vma" "namespace":"myproject"] "spec":map["running":%!q(bool=false) "template":map["spec":map["networks":[map["name":"default" "pod":map[]] map["multus":map["networkName":"br1"] "name":"br1"] map["multus":map["networkName":"br2"] "name":"br1"]] "nodeSelector":map["kubernetes.io/hostname":"working-8v662-worker-0-rdjsp"] "volumes":[map["containerDisk":map["image":"kubevirt/fedora-cloud-container-disk-demo:latest"] "name":"containerdisk"] map["cloudInitNoCloud":map["userData":"#cloud-config\npassword: fedora\nchpasswd: { expire: False }\nruncmd:\n - sudo nmcli con mod 'Wired connection 1' ipv4.address 10.201.0.1/24\n - sudo nmcli con mod 'Wired connection 1' ipv4.method manual\n - sudo nmcli con up 'Wired connection 1'"] "name":"cloudinitdisk"]] "domain":map["cpu":map["cores":'\x02'] "devices":map["disks":[map["disk":map["bus":"virtio"] "name":"containerdisk"] map["name":"cloudinitdisk" "disk":map["bus":"virtio"]]] "interfaces":[map["macAddress":"02:ff:fb:00:00:07" "masquerade":map[] "name":"default"] map["bridge":map[] "macAddress":"02:54:00:6a:9a:15" "name":"br1"]]] "resources":map["requests":map["memory":"1G"]]]]]]]}
for: "vm_a_temp.yaml": Internal error occurred: admission webhook "mutatevirtualmachines.example.com" denied the request: Failed to update virtual machine allocation error: failed to allocate requested mac address

@SchSeba
Copy link
Collaborator

SchSeba commented Jul 1, 2019

I take a look on this issue and the error looks like this because it's an update request.

The only section the kubemacpool can change is after the denied the request
example:

denied the request: Failed to update virtual machine allocation error: failed to allocate requested mac address

So I think we can't change this for now.
I will try to take a look on the controller-runtime package

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants