Skip to content
Snippets Groups Projects
client.go 16.8 KiB
Newer Older
scott.wang's avatar
scott.wang committed
/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package kubernetes

import (
	"context"
	"encoding/base64"
	"encoding/json"
	"os"
	"sync"
	"time"
)

import (
	perrors "github.com/pkg/errors"
	"k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/fields"
	"k8s.io/apimachinery/pkg/types"
	"k8s.io/apimachinery/pkg/util/strategicpatch"
	"k8s.io/apimachinery/pkg/watch"
	"k8s.io/client-go/kubernetes"
	"k8s.io/client-go/rest"
)

import (
	"github.com/apache/dubbo-go/common/logger"
)

const (
	// kubernetes inject the var
	podNameKey   = "HOSTNAME"
	nameSpaceKey = "NAMESPACE"
	// all pod annotation key
	DubboIOAnnotationKey = "dubbo.io/annotation"

	DubboIOLabelKey   = "dubbo.io/label"
	DubboIOLabelValue = "dubbo.io-value"
)

var (
scott.wang's avatar
scott.wang committed
	ErrDubboLabelAlreadyExist = perrors.New("dubbo label already exist")
)

type Client struct {

	// kubernetes connection config
	cfg *rest.Config

	// the kubernetes interface
	rawClient kubernetes.Interface

	// current pod config
	currentPodName string

	ns string

	// the memory store
	store Store

	// protect the wg && currentPod
	lock sync.Mutex
	// current pod status
	currentPod *v1.Pod
	// protect the maintenanceStatus loop && watcher
	wg sync.WaitGroup

	// manage the  client lifecycle
	ctx    context.Context
	cancel context.CancelFunc
}

// load CurrentPodName
func getCurrentPodName() (string, error) {

	v := os.Getenv(podNameKey)
	if len(v) == 0 {
		return "", perrors.New("read value from env by key (HOSTNAME)")
	}
	return v, nil
}

// load CurrentNameSpace
func getCurrentNameSpace() (string, error) {

	v := os.Getenv(nameSpaceKey)
	if len(v) == 0 {
		return "", perrors.New("read value from env by key (NAMESPACE)")
	}
	return v, nil
}

// NewMockClient
// export for registry package test
func NewMockClient(namespace string, mockClientGenerator func() (kubernetes.Interface, error)) (*Client, error) {
	return newMockClient(namespace, mockClientGenerator)
}

// newMockClient
// new a client for  test
func newMockClient(namespace string, mockClientGenerator func() (kubernetes.Interface, error)) (*Client, error) {

	rawClient, err := mockClientGenerator()
	if err != nil {
		return nil, perrors.WithMessage(err, "call mock generator")
	}

	currentPodName, err := getCurrentPodName()
	if err != nil {
		return nil, perrors.WithMessage(err, "get pod name")
	}

	ctx, cancel := context.WithCancel(context.Background())

	c := &Client{
		currentPodName: currentPodName,
		ns:             namespace,
		rawClient:      rawClient,
		ctx:            ctx,
		store:          newStore(ctx),
		cancel:         cancel,
	}

	currentPod, err := c.initCurrentPod()
	if err != nil {
		return nil, perrors.WithMessage(err, "init current pod")
	}

	// record current status
	c.currentPod = currentPod

	// init the store by current pods
	if err := c.initStore(); err != nil {
		return nil, perrors.WithMessage(err, "init store")
	}

	// start kubernetes watch loop
	if err := c.maintenanceStatus(); err != nil {
		return nil, perrors.WithMessage(err, "maintenance the kubernetes status")
	}

	logger.Info("init kubernetes registry success")
	return c, nil
}

// newClient
// new a client for registry
func newClient(namespace string) (*Client, error) {

	cfg, err := rest.InClusterConfig()
	if err != nil {
		return nil, perrors.WithMessage(err, "get in-cluster config")
	}

	rawClient, err := kubernetes.NewForConfig(cfg)
	if err != nil {
		return nil, perrors.WithMessage(err, "new kubernetes client by in cluster config")
	}

	currentPodName, err := getCurrentPodName()
	if err != nil {
		return nil, perrors.WithMessage(err, "get pod name")
	}

	ctx, cancel := context.WithCancel(context.Background())

	c := &Client{
		currentPodName: currentPodName,
		ns:             namespace,
		cfg:            cfg,
		rawClient:      rawClient,
		ctx:            ctx,
		store:          newStore(ctx),
		cancel:         cancel,
	}

	currentPod, err := c.initCurrentPod()
	if err != nil {
		return nil, perrors.WithMessage(err, "init current pod")
	}

	// record current status
	c.currentPod = currentPod

	// init the store by current pods
	if err := c.initStore(); err != nil {
		return nil, perrors.WithMessage(err, "init store")
	}

	// start kubernetes watch loop
	if err := c.maintenanceStatus(); err != nil {
		return nil, perrors.WithMessage(err, "maintenance the kubernetes status")
	}

	logger.Info("init kubernetes registry success")
	return c, nil
}

// initCurrentPod
// 1. get current pod
// 2. give the dubbo-label for this pod
func (c *Client) initCurrentPod() (*v1.Pod, error) {

	// read the current pod status
	currentPod, err := c.rawClient.CoreV1().Pods(c.ns).Get(c.currentPodName, metav1.GetOptions{})
	if err != nil {
		return nil, perrors.WithMessagef(err, "get current (%s) pod in namespace (%s)", c.currentPodName, c.ns)
	}

	oldPod, newPod, err := c.assembleDUBBOLabel(currentPod)
	if err != nil {
		if err != ErrDubboLabelAlreadyExist {
			return nil, perrors.WithMessage(err, "assemble dubbo label")
		}
		// current pod don't have label
	}

	p, err := c.getPatch(oldPod, newPod)
	if err != nil {
		return nil, perrors.WithMessage(err, "get patch")
	}

	currentPod, err = c.patchCurrentPod(p)
	if err != nil {
		return nil, perrors.WithMessage(err, "patch to current pod")
	}

	return currentPod, nil
}

// initStore
// 1. get all with dubbo label pods
// 2. put every element to store
func (c *Client) initStore() error {

	pods, err := c.rawClient.CoreV1().Pods(c.ns).List(metav1.ListOptions{
		LabelSelector: fields.OneTermEqualSelector(DubboIOLabelKey, DubboIOLabelValue).String(),
	})
	if err != nil {
		return perrors.WithMessagef(err, "list pods  in namespace (%s)", c.ns)
	}

	for _, pod := range pods.Items {
		logger.Debugf("got the pod (name: %s), (label: %v), (annotations: %v)", pod.Name, pod.GetLabels(), pod.GetAnnotations())
		c.handleWatchedPodEvent(&pod, watch.Added)
	}

	return nil
}

// maintenanceStatus
// try to watch kubernetes pods
func (c *Client) maintenanceStatus() error {

	// try once
	watcher, err := c.rawClient.CoreV1().Pods(c.ns).Watch(metav1.ListOptions{
		LabelSelector: fields.OneTermEqualSelector(DubboIOLabelKey, DubboIOLabelValue).String(),
		Watch:         true,
	})
	if err != nil {
		return perrors.WithMessagef(err, "try to watch the namespace (%s) pods", c.ns)
	}

	watcher.Stop()

sxllwx's avatar
sxllwx committed
	c.wg.Add(1)
	// add wg, grace close the client
	go c.maintenanceStatusLoop()
	return nil
}

// maintenanceStatus
// try to notify
func (c *Client) maintenanceStatusLoop() {

	defer func() {
		// notify other goroutine, this loop over
		c.wg.Done()
		logger.Info("maintenanceStatusLoop goroutine game over")
	}()

	var lastResourceVersion string

	for {

		wc, err := c.rawClient.CoreV1().Pods(c.ns).Watch(metav1.ListOptions{
			LabelSelector:   fields.OneTermEqualSelector(DubboIOLabelKey, DubboIOLabelValue).String(),
			Watch:           true,
			ResourceVersion: lastResourceVersion,
		})
		if err != nil {
			logger.Warnf("watch the namespace (%s) pods: %v, retry after 2 seconds", c.ns, err)
			time.Sleep(2 * time.Second)
			continue
		}

		logger.Infof("the old kubernetes client broken, collect the resource status from resource version (%s)", lastResourceVersion)

		select {
		case <-c.ctx.Done():
			// the client stopped
			logger.Info("the kubernetes client stopped")
			return

		default:

			for {
				select {
				// double check ctx
				case <-c.ctx.Done():
					logger.Info("the kubernetes client stopped")
					goto onceWatch
328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704

					// get one element from result-chan
				case event, ok := <-wc.ResultChan():
					if !ok {
						wc.Stop()
						logger.Info("kubernetes watch chan die, create new")
						goto onceWatch
					}

					if event.Type == watch.Error {
						// watched a error event
						logger.Warnf("kubernetes watch api report err (%#v)", event)
						continue
					}

					type resourceVersionGetter interface {
						GetResourceVersion() string
					}

					o, ok := event.Object.(resourceVersionGetter)
					if !ok {
						continue
					}

					// record the last resource version avoid to sync all pod
					lastResourceVersion = o.GetResourceVersion()
					logger.Infof("kuberentes get the current resource version %v", lastResourceVersion)

					// check event object type
					p, ok := event.Object.(*v1.Pod)
					if !ok {
						// not a pod
						continue
					}

					// handle the watched pod
					go c.handleWatchedPodEvent(p, event.Type)
				}
			}
		onceWatch:
		}
	}
}

// handleWatchedPodEvent
// handle watched pod event
func (c *Client) handleWatchedPodEvent(p *v1.Pod, eventType watch.EventType) {

	for ak, av := range p.GetAnnotations() {

		// not dubbo interest annotation
		if ak != DubboIOAnnotationKey {
			continue
		}

		ol, err := c.unmarshalRecord(av)
		if err != nil {
			logger.Errorf("there a pod with dubbo annotation, but unmarshal dubbo value %v", err)
			return
		}

		for _, o := range ol {

			switch eventType {
			case watch.Added:
				// if pod is added, the record always be create
				o.EventType = Create
			case watch.Modified:
				o.EventType = Update
			case watch.Deleted:
				o.EventType = Delete
			default:
				logger.Errorf("no valid kubernetes event-type (%s) ", eventType)
				return
			}

			logger.Debugf("prepare to put object (%#v) to kuberentes-store", o)

			if err := c.store.Put(o); err != nil {
				logger.Errorf("put (%#v) to cache store: %v ", o, err)
				return
			}

		}

	}
}

// unmarshalRecord
// unmarshal the kubernetes dubbo annotation value
func (c *Client) unmarshalRecord(record string) ([]*Object, error) {

	if len(record) == 0 {
		// NOTICE:
		// []*Object is nil.
		return nil, nil
	}

	rawMsg, err := base64.URLEncoding.DecodeString(record)
	if err != nil {
		return nil, perrors.WithMessagef(err, "decode record (%s)", record)
	}

	var out []*Object
	if err := json.Unmarshal(rawMsg, &out); err != nil {
		return nil, perrors.WithMessage(err, "decode json")
	}
	return out, nil
}

// marshalRecord
// marshal the kubernetes dubbo annotation value
func (c *Client) marshalRecord(ol []*Object) (string, error) {

	msg, err := json.Marshal(ol)
	if err != nil {
		return "", perrors.WithMessage(err, "json encode object list")
	}
	return base64.URLEncoding.EncodeToString(msg), nil
}

// readCurrentPod
// read the current pod status from kubernetes api
func (c *Client) readCurrentPod() (*v1.Pod, error) {

	currentPod, err := c.rawClient.CoreV1().Pods(c.ns).Get(c.currentPodName, metav1.GetOptions{})
	if err != nil {
		return nil, perrors.WithMessagef(err, "get current (%s) pod in namespace (%s)", c.currentPodName, c.ns)
	}
	return currentPod, nil
}

// Create
// create k/v pair in storage
func (c *Client) Create(k, v string) error {

	// 1. accord old pod && (k, v) assemble new pod dubbo annotion v
	// 2. get patch data
	// 3. PATCH the pod
	c.lock.Lock()
	defer c.lock.Unlock()

	currentPod, err := c.readCurrentPod()
	if err != nil {
		return perrors.WithMessage(err, "read current pod")
	}

	oldPod, newPod, err := c.assembleDUBBOAnnotations(k, v, currentPod)
	if err != nil {
		return perrors.WithMessage(err, "assemble")
	}

	patchBytes, err := c.getPatch(oldPod, newPod)
	if err != nil {
		return perrors.WithMessage(err, "get patch")
	}

	updatedPod, err := c.patchCurrentPod(patchBytes)
	if err != nil {
		return perrors.WithMessage(err, "patch current pod")
	}

	c.currentPod = updatedPod
	// not update the store, the store should be write by the  maintenanceStatusLoop
	return nil
}

// patch current pod
// write new meta for current pod
func (c *Client) patchCurrentPod(patch []byte) (*v1.Pod, error) {

	updatedPod, err := c.rawClient.CoreV1().Pods(c.ns).Patch(c.currentPodName, types.StrategicMergePatchType, patch)
	if err != nil {
		return nil, perrors.WithMessage(err, "patch in kubernetes pod ")
	}
	return updatedPod, nil
}

// assemble the dubbo kubernete label
// every dubbo instance should be labeled spec {"dubbo.io/label":"dubbo.io/label-value"} label
func (c *Client) assembleDUBBOLabel(currentPod *v1.Pod) (oldPod *v1.Pod, newPod *v1.Pod, err error) {

	oldPod = &v1.Pod{}
	newPod = &v1.Pod{}

	oldPod.Labels = make(map[string]string, 8)
	newPod.Labels = make(map[string]string, 8)

	if currentPod.GetLabels() != nil {

		if currentPod.GetLabels()[DubboIOLabelKey] == DubboIOLabelValue {
			// already have label
			err = ErrDubboLabelAlreadyExist
			return
		}
	}

	// copy current pod labels to oldPod && newPod
	for k, v := range currentPod.GetLabels() {
		oldPod.Labels[k] = v
		newPod.Labels[k] = v
	}
	// assign new label for current pod
	newPod.Labels[DubboIOLabelKey] = DubboIOLabelValue
	return
}

// assemble the dubbo kubernetes annotations
// accord the current pod && (k,v) assemble the old-pod, new-pod
func (c *Client) assembleDUBBOAnnotations(k, v string, currentPod *v1.Pod) (oldPod *v1.Pod, newPod *v1.Pod, err error) {

	oldPod = &v1.Pod{}
	newPod = &v1.Pod{}
	oldPod.Annotations = make(map[string]string, 8)
	newPod.Annotations = make(map[string]string, 8)

	for k, v := range currentPod.GetAnnotations() {
		oldPod.Annotations[k] = v
		newPod.Annotations[k] = v
	}

	al, err := c.unmarshalRecord(oldPod.GetAnnotations()[DubboIOAnnotationKey])
	if err != nil {
		err = perrors.WithMessage(err, "unmarshal record")
		return
	}

	newAnnotations, err := c.marshalRecord(append(al, &Object{Key: k, Value: v}))
	if err != nil {
		err = perrors.WithMessage(err, "marshal record")
		return
	}

	newPod.Annotations[DubboIOAnnotationKey] = newAnnotations
	return
}

// getPatch
// get the kubernetes pod patch bytes
func (c *Client) getPatch(oldPod, newPod *v1.Pod) ([]byte, error) {

	oldData, err := json.Marshal(oldPod)
	if err != nil {
		return nil, perrors.WithMessage(err, "marshal old pod")
	}

	newData, err := json.Marshal(newPod)
	if err != nil {
		return nil, perrors.WithMessage(err, "marshal newPod pod")
	}

	patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Pod{})
	if err != nil {
		return nil, perrors.WithMessage(err, "create two-way-merge-patch")
	}
	return patchBytes, nil
}

// GetChildren
// get k children list from kubernetes-store
func (c *Client) GetChildren(k string) ([]string, []string, error) {

	objectList, err := c.store.Get(k, true)
	if err != nil {
		return nil, nil, perrors.WithMessagef(err, "get children from store on (%s)", k)
	}

	var kList []string
	var vList []string

	for _, o := range objectList {
		kList = append(kList, o.Key)
		vList = append(vList, o.Value)
	}

	return kList, vList, nil
}

// Watch
// watch on spec key
func (c *Client) Watch(k string) (<-chan *Object, error) {

	w, err := c.store.Watch(k, false)
	if err != nil {
		return nil, perrors.WithMessagef(err, "watch on (%s)", k)
	}

	return w.ResultChan(), nil
}

// Watch
// watch on spec prefix
func (c *Client) WatchWithPrefix(prefix string) (<-chan *Object, error) {

	w, err := c.store.Watch(prefix, true)
	if err != nil {
		return nil, perrors.WithMessagef(err, "watch on prefix (%s)", prefix)
	}

	return w.ResultChan(), nil
}

// Valid
// Valid the client
// if return false, the client is die
func (c *Client) Valid() bool {

	select {
	case <-c.Done():
		return false
	default:
		return true
	}
}

// Done
// read the client status
func (c *Client) Done() <-chan struct{} {
	return c.ctx.Done()
}

// Stop
// read the client status
func (c *Client) Close() {

	select {
	case <-c.ctx.Done():
		//already stopped
		return
	default:
	}
	c.cancel()

	// the client ctx be canceled
	// will trigger the store watchers all stopped
	// so, just wait
	c.wg.Wait()
}

// ValidateClient
// validate the kubernetes client
func ValidateClient(container clientFacade) error {

	lock := container.ClientLock()
	lock.Lock()
	defer lock.Unlock()

	// new Client
	if container.Client() == nil {
		ns, err := getCurrentNameSpace()
		if err != nil {
			return perrors.WithMessage(err, "get current namespace")
		}
		newClient, err := newClient(ns)
		if err != nil {
			logger.Warnf("new kubernetes client (namespace{%s}: %v)", ns, err)
			return perrors.WithMessagef(err, "new kubernetes client (:%+v)", ns)
		}
		container.SetClient(newClient)
	}

	if !container.Client().Valid() {

		ns, err := getCurrentNameSpace()
		if err != nil {
			return perrors.WithMessage(err, "get current namespace")
		}
		newClient, err := newClient(ns)
		if err != nil {
			logger.Warnf("new kubernetes client (namespace{%s}: %v)", ns, err)
			return perrors.WithMessagef(err, "new kubernetes client (:%+v)", ns)
		}
		container.SetClient(newClient)
	}

	return nil
}