Add error checks per report from errcheck-ng

This commit is contained in:
Oleksii Kliukin 2017-06-07 18:58:52 +02:00 committed by Oleksii Kliukin
parent 292a9bda05
commit bc0e9ab4bc
9 changed files with 48 additions and 19 deletions

View File

@ -532,7 +532,9 @@ func (c *Cluster) Delete() error {
// ReceivePodEvent is called back by the controller in order to add the cluster's pod event to the queue.
func (c *Cluster) ReceivePodEvent(event spec.PodEvent) {
c.podEventsQueue.Add(event)
if err := c.podEventsQueue.Add(event); err != nil {
c.logger.Errorf("error when receiving pod events: %v", err)
}
}
func (c *Cluster) processPodEvent(obj interface{}) error {
@ -562,7 +564,9 @@ func (c *Cluster) processPodEventQueue(stopCh <-chan struct{}) {
case <-stopCh:
return
default:
c.podEventsQueue.Pop(cache.PopProcessFunc(c.processPodEvent))
if _, err := c.podEventsQueue.Pop(cache.PopProcessFunc(c.processPodEvent)); err != nil {
c.logger.Errorf("error when processing pod event queeue %v", err)
}
}
}
}

View File

@ -37,10 +37,7 @@ func (c *Cluster) resizePostgresFilesystem(podName *spec.NamespacedName, resizer
return c.ExecCommand(podName, "bash", "-c", cmd)
})
if err != nil {
return err
}
return nil
}
return fmt.Errorf("could not resize filesystem: no compatible resizers for the filesystem of type %s", fsType)
}

View File

@ -48,7 +48,9 @@ func (c *Cluster) initDbConn() (err error) {
}
err = conn.Ping()
if err != nil {
conn.Close()
if err2 := conn.Close(); err2 != nil {
c.logger.Error("error when closing PostgreSQL connection after another error: %v", err2)
}
return err
}
@ -64,7 +66,12 @@ func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUser
if rows, err = c.pgDb.Query(getUserSQL, pq.Array(userNames)); err != nil {
return nil, fmt.Errorf("error when querying users: %v", err)
}
defer rows.Close()
defer func() {
if err2 := rows.Close(); err2 != nil {
err = fmt.Errorf("error when closing query cursor: %v", err2)
}
}()
for rows.Next() {
var (
rolname, rolpassword string

View File

@ -108,7 +108,11 @@ func (c *Cluster) resizeVolumes(newVolume spec.Volume, resizers []volumes.Volume
if err != nil {
return fmt.Errorf("could not connect to the volume provider: %v", err)
}
defer resizer.DisconnectFromProvider()
defer func() {
err2 := resizer.DisconnectFromProvider(); if err2 != nil {
c.logger.Errorf("%v", err2)
}
}()
}
awsVolumeId, err := resizer.GetProviderVolumeID(pv)
if err != nil {

View File

@ -96,11 +96,13 @@ func (c *Controller) initController() {
c.opConfig.ResyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
c.postgresqlInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
if err := c.postgresqlInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.postgresqlAdd,
UpdateFunc: c.postgresqlUpdate,
DeleteFunc: c.postgresqlDelete,
})
}); err != nil {
c.logger.Fatalf("could not add event handlers: %v", err)
}
// Pods
podLw := &cache.ListWatch{
@ -114,11 +116,13 @@ func (c *Controller) initController() {
c.opConfig.ResyncPeriodPod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
c.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
if err := c.podInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.podAdd,
UpdateFunc: c.podUpdate,
DeleteFunc: c.podDelete,
})
}); err != nil {
c.logger.Fatalf("could not add event handlers: %v", err)
}
c.clusterEventQueues = make([]*cache.FIFO, c.opConfig.Workers)
for i := range c.clusterEventQueues {

View File

@ -183,7 +183,9 @@ func (c *Controller) processEvent(obj interface{}) error {
func (c *Controller) processClusterEventsQueue(idx int) {
for {
c.clusterEventQueues[idx].Pop(cache.PopProcessFunc(c.processEvent))
if _, err := c.clusterEventQueues[idx].Pop(cache.PopProcessFunc(c.processEvent)); err != nil {
c.logger.Errorf("error when processing cluster events queue: %v", err)
}
}
}
@ -224,7 +226,9 @@ func (c *Controller) queueClusterEvent(old, new *spec.Postgresql, eventType spec
}
//TODO: if we delete cluster, discard all the previous events for the cluster
c.clusterEventQueues[workerID].Add(clusterEvent)
if err := c.clusterEventQueues[workerID].Add(clusterEvent); err != nil {
c.logger.WithField("worker", workerID).Errorf("error when queueing cluster event: %v", clusterEvent)
}
c.logger.WithField("worker", workerID).Infof("%s of the '%s' cluster has been queued", eventType, clusterName)
}

View File

@ -56,7 +56,9 @@ func KubernetesRestClient(c *rest.Config) (*rest.RESTClient, error) {
)
return nil
})
schemeBuilder.AddToScheme(api.Scheme)
if err := schemeBuilder.AddToScheme(api.Scheme); err != nil {
return nil, fmt.Errorf("could not apply functions to register PostgreSQL TPR type: %v", err)
}
return rest.RESTClientFor(c)
}

View File

@ -58,7 +58,7 @@ func NewTeamsAPI(url string, log *logrus.Logger) *API {
}
// TeamInfo returns information about a given team using its ID and a token to authenticate to the API service.
func (t *API) TeamInfo(teamID, token string) (*team, error) {
func (t *API) TeamInfo(teamID, token string) (tm *team, er error) {
url := fmt.Sprintf("%s/teams/%s", t.url, teamID)
t.logger.Debugf("Request url: %s", url)
req, err := http.NewRequest("GET", url, nil)
@ -71,7 +71,12 @@ func (t *API) TeamInfo(teamID, token string) (*team, error) {
if err != nil {
return nil, err
}
defer resp.Body.Close()
defer func() {
if err:= resp.Body.Close(); err != nil {
er = fmt.Errorf("error when closing response; %v", err)
tm = nil
}
}()
if resp.StatusCode != 200 {
var raw map[string]json.RawMessage
d := json.NewDecoder(resp.Body)

View File

@ -148,7 +148,9 @@ func TestInfo(t *testing.T) {
t.Errorf("Authorization token is wrong or not provided")
}
w.WriteHeader(tc.inCode)
fmt.Fprint(w, tc.in)
if _, err := fmt.Fprint(w, tc.in); err != nil {
t.Errorf("Error writing teams api response %v", err)
}
}))
defer ts.Close()
api := NewTeamsAPI(ts.URL, logger)