Skip to content
This repository was archived by the owner on Mar 26, 2020. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,10 @@
name = "github.com/godbus/dbus"
version = "4.1.0"

[[constraint]]
name = "k8s.io/kubernetes"
version = "v1.13.0"

[prune]
go-tests = true
non-go = true
Expand Down
9 changes: 5 additions & 4 deletions glusterd2/volume/fs_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ func createSizeInfo(fstat *syscall.Statfs_t) *SizeInfo {

const fuseSuperMagic = 1702057286

func mountVolume(name string, mountpoint string) error {
//MountVolume mounts the gluster volume on a given mount point
func MountVolume(name string, mountpoint string, mntOptns string) error {
// NOTE: Why do it this way ?
// * Libgfapi leaks memory on unmount.
// * Glusterfs volumes cannot be mounted using syscall.Mount()
Expand All @@ -67,8 +68,8 @@ func mountVolume(name string, mountpoint string) error {
buffer.WriteString(fmt.Sprintf(" --volfile-server-port %s", sport))
buffer.WriteString(fmt.Sprintf(" --volfile-id %s", name))
buffer.WriteString(" --log-file /dev/null")
buffer.WriteString(" --read-only ")
buffer.WriteString(mountpoint)
buffer.WriteString(mntOptns)
buffer.WriteString(" " + mountpoint)

args := strings.Fields(buffer.String())
cmd := exec.Command("glusterfs", args...)
Expand All @@ -88,7 +89,7 @@ func UsageInfo(volname string) (*SizeInfo, error) {
}
defer os.Remove(tempDir)

if err := mountVolume(volname, tempDir); err != nil {
if err := MountVolume(volname, tempDir, " --read-only "); err != nil {
return nil, err
}
defer syscall.Unmount(tempDir, syscall.MNT_FORCE)
Expand Down
38 changes: 38 additions & 0 deletions pkg/restclient/block_volume.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
package restclient

import (
"fmt"
"net/http"

"github.com/gluster/glusterd2/plugins/blockvolume/api"
)

// BlockVolumeCreate creates Gluster Block Volume
func (c *Client) BlockVolumeCreate(provider string, req api.BlockVolumeCreateRequest) (api.BlockVolumeCreateResp, error) {
var vol api.BlockVolumeCreateResp
err := c.post("v1/blockvolumes/"+provider, req, http.StatusCreated, &vol)
return vol, err
}

// BlockVolumeList lists Gluster Block Volumes
func (c *Client) BlockVolumeList(provider string) (api.BlockVolumeListResp, error) {
//TODO: Are filters required?
var vols api.BlockVolumeListResp
url := fmt.Sprintf("/v1/blockvolumes/%s", provider)
err := c.get(url, nil, http.StatusOK, &vols)
return vols, err
}

// BlockVolumeGet gets Gluster Block Volume info
func (c *Client) BlockVolumeGet(provider string, blockVolname string) (api.BlockVolumeGetResp, error) {
var vol api.BlockVolumeGetResp
url := fmt.Sprintf("/v1/blockvolumes/%s/%s", provider, blockVolname)
err := c.get(url, nil, http.StatusOK, &vol)
return vol, err
}

// BlockVolumeDelete deletes Gluster Block Volume
func (c *Client) BlockVolumeDelete(provider string, blockVolname string) error {
url := fmt.Sprintf("/v1/blockvolumes/%s/%s", provider, blockVolname)
return c.del(url, nil, http.StatusNoContent, nil)
}
257 changes: 257 additions & 0 deletions plugins/blockvolume/blockprovider/gluster-loopback/glusterloopback.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,257 @@
package glusterloopback

import (
"context"
"errors"
"fmt"
"io/ioutil"
"os"

"github.com/gluster/glusterd2/glusterd2/volume"
"github.com/gluster/glusterd2/pkg/utils"
"github.com/gluster/glusterd2/plugins/blockvolume/blockprovider"
blkUtils "github.com/gluster/glusterd2/plugins/blockvolume/utils"

log "github.com/sirupsen/logrus"
config "github.com/spf13/viper"
"k8s.io/kubernetes/pkg/util/mount"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@poornimag do we have Gopkg.toml changes for this one?

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if you are updating the dependencies using dep ensure, Please make sure version of dep to be v0.5.0

)

const providerName = "gluster-loopback"

var mounter = mount.New("")

func init() {
blockprovider.RegisterBlockProvider(providerName, newGlusterLoopBlk)
}

// GlusterLoopBlk implements block Provider interface. It represents a gluster-block
type GlusterLoopBlk struct {
mounts map[string]string
}

func newGlusterLoopBlk() (blockprovider.Provider, error) {
gb := &GlusterLoopBlk{}

gb.mounts = make(map[string]string)

return gb, nil
}

func mountHost(g *GlusterLoopBlk, hostVolume string) (string, error) {
hostDir := g.mounts[hostVolume]
if hostDir == "" {
hostDir = config.GetString("rundir") + "/blockvolume/" + hostVolume
notMnt, err := mounter.IsLikelyNotMountPoint(hostDir)
if err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(hostDir, os.ModeDir|os.ModePerm)
if err != nil {
return "", fmt.Errorf("failed to create mount point %+v", err)
}
notMnt = true
} else {
return "", fmt.Errorf("failed to mount block host volume %+v", err)
}
}

if notMnt {
err = volume.MountVolume(hostVolume, hostDir, "")
if err != nil {
return "", fmt.Errorf("failed to mount block host volume %+v", err)
}
}
g.mounts[hostVolume] = hostDir
}
return hostDir, nil
}

// CreateBlockVolume will create a gluster block volume with given name and size having `hostVolume` as hosting volume
func (g *GlusterLoopBlk) CreateBlockVolume(name string, size uint64, hostVolume string, options ...blockprovider.BlockVolOption) (blockprovider.BlockVolume, error) {
blockVolOpts := &blockprovider.BlockVolumeOptions{}
blockVolOpts.ApplyOpts(options...)
logger := log.WithFields(log.Fields{
"block_name": name,
"hostvol": hostVolume,
"requested_block_size": size,
})

hostDir, err := mountHost(g, hostVolume)
if err != nil {
return nil, fmt.Errorf("failed to mount block hosting volume %+v", err)
}

blockFileName := hostDir + "/" + name
err = utils.ExecuteCommandRun("truncate", fmt.Sprintf("-s %d", size), blockFileName) //nolint: gosec
if err != nil {
return nil, fmt.Errorf("failed to truncate block file %s: %+v", blockFileName, err)
}

err = utils.ExecuteCommandRun("mkfs.xfs", "-f", blockFileName) //nolint: gosec
if err != nil {
return nil, fmt.Errorf("failed to format block file %s: %+v", blockFileName, err)
}

resizeFunc := func(blockHostingAvailableSize, blockSize uint64) uint64 { return blockHostingAvailableSize - blockSize }
if err = blkUtils.ResizeBlockHostingVolume(hostVolume, size, resizeFunc); err != nil {
logger.WithError(err).Error("failed in updating hostvolume _block-hosting-available-size metadata")
}

return &BlockVolume{
hostVolume: hostVolume,
name: name,
size: size,
}, err
}

// DeleteBlockVolume deletes a gluster block volume of give name
func (g *GlusterLoopBlk) DeleteBlockVolume(name string, options ...blockprovider.BlockVolOption) error {
var (
blockVolOpts = &blockprovider.BlockVolumeOptions{}
hostVol string
)

blockVolOpts.ApplyOpts(options...)

// TODO: Listing all the block volumes to delete one block vol will bottleneck at scale. Possible options:
// - Let block delete carry the host volume(optionally). The caller needs to keep this info returned in create vol, and send it in delete req.
// - Build a map in memory ([blockvolume]hostvolume)during init(or lazy) during init of provider/create of block volume
blockVols := g.BlockVolumes()

for _, blockVol := range blockVols {
if blockVol.Name() == name {
hostVol = blockVol.HostVolume()
break
}
}

if hostVol == "" {
return errors.New("block volume not found")
}

hostDir, err := mountHost(g, hostVol)
if err != nil {
return err
}

blockFileName := hostDir + "/" + name
stat, err := os.Stat(blockFileName)
if err != nil {
return err
}

err = os.Remove(blockFileName)
if err != nil {
return err
}

size := stat.Size()
resizeFunc := func(blockHostingAvailableSize, blockSize uint64) uint64 { return blockHostingAvailableSize + blockSize }
if err = blkUtils.ResizeBlockHostingVolume(hostVol, size, resizeFunc); err != nil {
log.WithFields(log.Fields{
"error": err,
"size": size,
}).Error("error in resizing the block hosting volume")
}

return err
}

// GetBlockVolume gives info about a gluster block volume
func (g *GlusterLoopBlk) GetBlockVolume(name string) (blockprovider.BlockVolume, error) {
var (
blockVolume blockprovider.BlockVolume
availableBlockVolumes = g.BlockVolumes()
)

//TODO: looping through all block volumes to get one block vol info is not scalable, fix it
for _, blockVol := range availableBlockVolumes {
if blockVol.Name() == name {
blockVolume = blockVol
break
}
}

if blockVolume == nil {
return nil, errors.New("block volume not found")
}

glusterBlockVol := &BlockVolume{
name: blockVolume.Name(),
hostVolume: blockVolume.HostVolume(),
size: blockVolume.Size(),
}

return glusterBlockVol, nil
}

// BlockVolumes returns all available gluster block volume
func (g *GlusterLoopBlk) BlockVolumes() []blockprovider.BlockVolume {
var glusterBlockVolumes = []blockprovider.BlockVolume{}

volumes, err := volume.GetVolumes(context.Background())
if err != nil {
return glusterBlockVolumes
}

volumes = volume.ApplyFilters(volumes, volume.BlockHosted)

for _, hostVol := range volumes {
hostDir, err := mountHost(g, hostVol.Name)
if err != nil {
return glusterBlockVolumes
}

dirent, err := ioutil.ReadDir(hostDir)
if err != nil {
return glusterBlockVolumes
}

for _, blockVol := range dirent {
glusterBlockVolumes = append(glusterBlockVolumes, &BlockVolume{name: blockVol.Name(), hostVolume: hostVol.Name, size: uint64(blockVol.Size())})
}
}

return glusterBlockVolumes
}

// ProviderName returns name of block provider
func (g *GlusterLoopBlk) ProviderName() string {
return providerName
}

// BlockVolume implements blockprovider.BlockVolume interface.
// It holds information about a gluster-block volume
type BlockVolume struct {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think we don't need all fields in this struct

hosts []string
hostVolume string
name string
size uint64
}

// HostAddresses returns host addresses of a gluster block vol
func (gv *BlockVolume) HostAddresses() []string { return gv.hosts }
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this functions are also not needed

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then it would complain that the interface methods are not implemented? How else to do this?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

then IMO this will create an issue if we add different input structures for different provisioners, isn't it?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, So we should have kept the structures different for each provider? The structure is union of all the fields of each provider. And the caller ends up calling these functions anyways as the req structure of block vol has these fields. How to go about this?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we need to refactor the code to achieve this.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Required for this patch?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we can skip this cleanup, please create an issue to do the cleanup. so that we won't miss it

Copy link

@oshankkumar oshankkumar Jan 14, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Madhu-1 we can take this task to define a better interface for BlockVolume to support all block providers in some other patch, for now this should be good to go.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Madhu-1 sure I'll do that


// IQN returns IQN of a gluster block vol
func (gv *BlockVolume) IQN() string { return "" }

// Username returns username of a gluster-block vol.
func (gv *BlockVolume) Username() string { return "" }

// Password returns password for a gluster block vol
func (gv *BlockVolume) Password() string { return "" }

// HostVolume returns host vol name of gluster block
func (gv *BlockVolume) HostVolume() string { return gv.hostVolume }

// Name returns name of gluster block vol
func (gv *BlockVolume) Name() string { return gv.name }

// Size returns size of a gluster block vol in bytes
func (gv *BlockVolume) Size() uint64 { return gv.size }

// ID returns Gluster Block ID
func (gv *BlockVolume) ID() string { return "" }

// HaCount returns high availability count
func (gv *BlockVolume) HaCount() int { return 0 }
1 change: 1 addition & 0 deletions plugins/blockvolume/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@ package blockvolume
import (
// initialise all block providers
_ "github.com/gluster/glusterd2/plugins/blockvolume/blockprovider/gluster-block"
_ "github.com/gluster/glusterd2/plugins/blockvolume/blockprovider/gluster-loopback"
)