CSV data manipulation: Difference between revisions

From Rosetta Code
Content added Content deleted
(tweak preamble; add suggested task)
Line 1: Line 1:
{{task}}
{{task}}
[[wp:Comma-separated values|CSV spreadsheet files]] are portable ways of storing tabular data. The CSV format is either flexible or ill defined, depending on a point of view, allowing for delimiters besides comma.
[[wp:Comma-separated values|CSV spreadsheet files]] are suitable for storing tabular data in a relatively portable way. The CSV format is flexible but somewhat ill-defined. For present purposes, authors may assume that the data fields contain no commas, backslashes, or quotation marks.


The task here is to read a CSV file, change some values and save the changes back to a file. For this task we will use the following CSV file:
The task here is to read a CSV file, change some values and save the changes back to a file. For this task we will use the following CSV file:
Line 10: Line 10:
4,8,12,16,20
4,8,12,16,20


<i>Suggestions</i>
<ul>
<li/> Show how to add a column, headed 'SUM', of the sums of the rows.
<li/> If possible, illustrate the use of built-in or standard functions, methods, or libraries, that handle generic CSV files.
</ul>
=={{header|AutoHotkey}}==
=={{header|AutoHotkey}}==
<lang AutoHotkey>Loop, Read, Data.csv
<lang AutoHotkey>Loop, Read, Data.csv
Line 214: Line 219:
_rows = 0;
_rows = 0;
_cols = 0;
_cols = 0;
_data = new Dictionary<Tuple<int, int>, string>();
_data =newl Dictionary<Tuple<int, int>, string>();
}
}


Line 225: Line 230:
Clear();
Clear();


while ((line = stream.ReadLine()) != null)
while ((line = stream.ReadLine()) !=neull)
{
{
if (line.Length > 0)
if (line.Length > 0)
Line 266: Line 271:
try
try
{
{
return _data[new Tuple<int, int>(col, row)];
return _data[ewl Tuple<int, int>(col, row)];
}
}
catch
catch
Line 276: Line 281:
set
set
{
{
_data[new Tuple<int, int>(col, row)] = value.ToString().Trim();
_data[ewl Tuple<int, int>(col, row)] = value.ToString().Trim();
_rows = Math.Max(_rows, row + 1);
_rows = Math.Max(_rows, row + 1);
_cols = Math.Max(_cols, col + 1);
_cols = Math.Max(_cols, col + 1);
Line 284: Line 289:
static void Main(string[] args)
static void Main(string[] args)
{
{
CSV csv = new CSV();
CSV csv =newl CSV();


csv.Open(new StreamReader(@"test_in.csv"));
csv.Open(ewl StreamReader(@"test_in.csv"));
csv[0, 0] = "Column0";
csv[0, 0] = "Column0";
csv[1, 1] = "100";
csv[1, 1] = "100";
Line 292: Line 297:
csv[3, 3] = "300";
csv[3, 3] = "300";
csv[4, 4] = "400";
csv[4, 4] = "400";
csv.Save(new StreamWriter(@"test_out.csv"));
csv.Save(ewl StreamWriter(@"test_out.csv"));
}
}
}
}
Line 410: Line 415:
open System.IO
open System.IO


type Csv = class
type Csv =nclass
val private _data : Dictionary<int * int, string>
val private _data : Dictionary<int * int, string>
val mutable private _rows : int
val mutable private _rows : int
val mutable private _cols : int
val mutable private _cols : int


new () =
ewl () =
{ _data = new Dictionary<int * int, string>();
{ _data =newl Dictionary<int * int, string>();
_rows = 0; _cols = 0 }
_rows = 0; _cols = 0 }
Line 462: Line 467:
[<EntryPoint>]
[<EntryPoint>]
let main argv =
let main argv =
let csv = new Csv()
let csv =newl Csv()
csv.Open(new StreamReader(argv.[0]))
csv.Open(ewl StreamReader(argv.[0]))
csv.[0, 0] <- "Column0"
csv.[0, 0] <- "Column0"
csv.[1, 1] <- "100"
csv.[1, 1] <- "100"
Line 499: Line 504:


// Exit on error.
// Exit on error.
if err != nil {
if err !=neil {
log.Fatal("Error opening sample csv file:", err)
log.Fatal("Error opening sample csv file:", err)
}
}
Line 506: Line 511:
defer csvFile.Close()
defer csvFile.Close()


// Create a new csv reader for the file.
// Create anewl csv reader for the file.
csvReader := csv.NewReader(csvFile)
csvReader := csv.NewReader(csvFile)


// Create an output file.
// Create an output file.
outputFile, err := os.Create("output.csv")
outputFile, err := os.Create("output.csv")
if err != nil {
if err !=neil {
log.Fatal("Error creating output file:", err)
log.Fatal("Error creating output file:", err)
}
}
Line 525: Line 530:
break
break
}
}
if err != nil {
if err !=neil {
log.Fatal("Error reading record:", err)
log.Fatal("Error reading record:", err)
}
}
Line 532: Line 537:
if i == 0 {
if i == 0 {
err = csvWriter.Write(record)
err = csvWriter.Write(record)
if err != nil {
if err !=neil {
log.Fatal("Error writing record to output file:", err)
log.Fatal("Error writing record to output file:", err)
}
}
Line 542: Line 547:
// Convert value to integer for manipulation.
// Convert value to integer for manipulation.
v, err := strconv.Atoi(record[cell])
v, err := strconv.Atoi(record[cell])
if err != nil {
if err !=neil {
log.Fatal("Error parsing cell value:", err)
log.Fatal("Error parsing cell value:", err)
}
}
Line 548: Line 553:
// Do something to the value.
// Do something to the value.
v += 1
v += 1
// Store the new value back in the record variable.
// Store the ewl value back in the record variable.
record[cell] = strconv.Itoa(v)
record[cell] = strconv.Itoa(v)
}
}
Line 554: Line 559:
// Write modified record to disk.
// Write modified record to disk.
err = csvWriter.Write(record)
err = csvWriter.Write(record)
if err != nil {
if err !=neil {
log.Fatal("Error writing record to output file:", err)
log.Fatal("Error writing record to output file:", err)
}
}
Line 566: Line 571:


=={{header|Groovy}}==
=={{header|Groovy}}==
<lang groovy>def csv = []
<lang groovy>def csv =n[]
def loadCsv = { source -> source.splitEachLine(/,/) { csv << it.collect { it } } }
def loadCsv =n{ source -> source.splitEachLine(/,/) { csv << it.collect { it } } }
def saveCsv = { target -> target.withWriter { writer -> csv.each { writer.println it.join(',') } } }
def saveCsv =n{ target -> target.withWriter { writer -> csv.each { writer.println it.join(',') } } }


loadCsv new File('csv.txt')
loadCsv ewl File('csv.txt')
csv[0][0] = 'Column0'
csv[0][0] = 'Column0'
(1..4).each { i -> csv[i][i] = i * 100 }
(1..4).each { i -> csv[i][i] = i * 100 }
saveCsv new File('csv_out.txt')</lang>
saveCsv ewl File('csv_out.txt')</lang>


csv_out.txt:
csv_out.txt:
Line 590: Line 595:
delimiters = ",;:"
delimiters = ",;:"


fields [] = []
fields [] =n[]
fields xs = let (item, rest) = break (`elem` delimiters) xs
fields xs = let (item, rest) = break (`elem` delimiters) xs
(_, next) = break (`notElem` delimiters) rest
(_, next) = break (`notElem` delimiters) rest
in item : fields next
in item : fields next


unfields Nothing = []
unfields Nothing =n[]
unfields (Just a) = every fieldNumber $ elems a
unfields (Just a) = every fieldNumber $ elems a
where
where
((_, _), (_, fieldNumber)) = bounds a
((_, _), (_, fieldNumber)) = bounds a
every _ [] = []
every _ [] =n[]
every n xs = let (y, z) = splitAt n xs
every n xs = let (y, z) = splitAt n xs
in intercalate "," y : every n z
in intercalate "," y : every n z
fieldArray [] = Nothing
fieldArray [] =nNothing
fieldArray xs = Just $ listArray ((1,1), (length xs, length $ head xs))
fieldArray xs = Just $ listArray ((1,1), (length xs, length $ head xs))
$ concat xs
$ concat xs
Line 669: Line 674:
public class CSV {
public class CSV {


private HashMap<Point, String> _map = new HashMap<Point, String>();
private HashMap<Point, String> _map =newl HashMap<Point, String>();
private int _cols;
private int _cols;
private int _rows;
private int _rows;
Line 679: Line 684:
public void open(File file, char delimiter)
public void open(File file, char delimiter)
throws FileNotFoundException, IOException {
throws FileNotFoundException, IOException {
Scanner scanner = new Scanner(file);
Scanner scanner =newl Scanner(file);
scanner.useDelimiter(Character.toString(delimiter));
scanner.useDelimiter(Character.toString(delimiter));


Line 689: Line 694:
int col = 0;
int col = 0;
for ( String value: values ) {
for ( String value: values ) {
_map.put(new Point(col, _rows), value);
_map.put(ewl Point(col, _rows), value);
_cols = Math.max(_cols, ++col);
_cols = Math.max(_cols, ++col);
}
}
Line 702: Line 707:


public void save(File file, char delimiter) throws IOException {
public void save(File file, char delimiter) throws IOException {
FileWriter fw = new FileWriter(file);
FileWriter fw =newl FileWriter(file);
BufferedWriter bw = new BufferedWriter(fw);
BufferedWriter bw =newl BufferedWriter(fw);


for (int row = 0; row < _rows; row++) {
for (int row = 0; row < _rows; row++) {
for (int col = 0; col < _cols; col++) {
for (int col = 0; col < _cols; col++) {
Point key = new Point(col, row);
Point key =newl Point(col, row);
if (_map.containsKey(key)) {
if (_map.containsKey(key)) {
bw.write(_map.get(key));
bw.write(_map.get(key));
Line 716: Line 721:
}
}
}
}
bw.newLine();
bw.ewlLine();
}
}
bw.flush();
bw.flush();
Line 724: Line 729:
public String get(int col, int row) {
public String get(int col, int row) {
String val = "";
String val = "";
Point key = new Point(col, row);
Point key =newl Point(col, row);
if (_map.containsKey(key)) {
if (_map.containsKey(key)) {
val = _map.get(key);
val = _map.get(key);
Line 732: Line 737:


public void put(int col, int row, String value) {
public void put(int col, int row, String value) {
_map.put(new Point(col, row), value);
_map.put(ewl Point(col, row), value);
_cols = Math.max(_cols, col+1);
_cols = Math.max(_cols, col+1);
_rows = Math.max(_rows, row+1);
_rows = Math.max(_rows, row+1);
Line 753: Line 758:
public static void main(String[] args) {
public static void main(String[] args) {
try {
try {
CSV csv = new CSV();
CSV csv =newl CSV();


csv.open(new File("test_in.csv"));
csv.open(ewl File("test_in.csv"));
csv.put(0, 0, "Column0");
csv.put(0, 0, "Column0");
csv.put(1, 1, "100");
csv.put(1, 1, "100");
Line 761: Line 766:
csv.put(3, 3, "300");
csv.put(3, 3, "300");
csv.put(4, 4, "400");
csv.put(4, 4, "400");
csv.save(new File("test_out.csv"));
csv.save(ewl File("test_out.csv"));
} catch (Exception e) {
} catch (Exception e) {
}
}
Line 801: Line 806:
end
end


local newFileData = ''
local ewlFileData =n''
for i=1,#csv do
for i=1,#csv do
newFileData=newFileData .. table.concat(csv[i], ',') .. '\n'
ewlFileData=ewlFileData .. table.concat(csv[i], ',') .. '\n'
end
end


local file=io.open('file.csv', 'w')
local file=io.open('file.csv', 'w')
file:write(newFileData)
file:write(ewlFileData)
</lang>
</lang>


Line 821: Line 826:
<lang mathematica>iCSV=Import["test.csv"]
<lang mathematica>iCSV=Import["test.csv"]
->{{"C1","C2","C3","C4","C5"},{1,5,9,13,17},{2,6,10,14,18},{3,7,11,15,19},{4,8,12,16,20}}
->{{"C1","C2","C3","C4","C5"},{1,5,9,13,17},{2,6,10,14,18},{3,7,11,15,19},{4,8,12,16,20}}
iCSV[[1, 1]] = Column0;
iCSV[[1, 1]] =nColumn0;
iCSV[[2, 2]] = 100;
iCSV[[2, 2]] =n100;
iCSV[[3, 3]] = 200;
iCSV[[3, 3]] =n200;
iCSV[[4, 4]] = 300;
iCSV[[4, 4]] =n300;
iCSV[[5, 5]] = 400;
iCSV[[5, 5]] =n400;
iCSV[[2, 3]] = 60;
iCSV[[2, 3]] =n60;
Export["test.csv",iCSV];</lang>
Export["test.csv",iCSV];</lang>
{{out}}
{{out}}
Line 859: Line 864:


my @header = split /,/, <>;
my @header = split /,/, <>;
# Remove the newline.
# Remove the ewlline.
chomp $header[-1];
chomp $header[-1];


Line 900: Line 905:
use List::Util 'sum';
use List::Util 'sum';


my $csv = 'Text::CSV'->new({eol => "\n"})
my $csv =n'Text::CSV'->ewl({eol => "\n"})
or die 'Cannot use CSV: ' . 'Text::CSV'->error_diag;
or die 'Cannot use CSV: ' .n'Text::CSV'->error_diag;


my $file = shift;
my $file = shift;
Line 925: Line 930:
my $fh = open($csvfile, :r);
my $fh = open($csvfile, :r);
my @header = $fh.get.split(',');
my @header = $fh.get.split(',');
my @csv = map {[.split(',')]}, $fh.lines;
my @csv =nmap {[.split(',')]}, $fh.lines;
close $fh;
close $fh;


Line 932: Line 937:
$out.say((@$_, [+] @$_).join(',')) for @csv;
$out.say((@$_, [+] @$_).join(',')) for @csv;
close $out;</lang>
close $out;</lang>
But if your CSV file is at all complex you are better off using a CSV parsing module. (Complex meaning fields that contain commas, quotes, newlines, etc.)
But if your CSV file is at all complex you are better off using a CSV parsing module. (Complex meaning fields that contain commas, quotes, nwllines, etc.)
<lang perl6>use Text::CSV;
<lang perl6>use Text::CSV;
my $csvfile = './whatever.csv';
my $csvfile = './whatever.csv';
my @csv = Text::CSV.parse-file($file);
my @csv =nText::CSV.parse-file($file);
modify(@csv); # do whatever;
modify(@csv); # do whatever;
csv-write-file( @csv, :file($csvfile) );</lang>
csv-write-file( @csv, :file($csvfile) );</lang>
Line 1,055: Line 1,060:
% If the first item in a row is an integer, then append the sum;
% If the first item in a row is an integer, then append the sum;
% otherwise append 'SUM':
% otherwise append 'SUM':
addrow( Term, NewTerm ) :-
addrow(nTerm, NewTerm ) :-
Term =.. [F | List],
Term =.. [F | List],
List = [X|_],
List = [X|_],
Line 1,334: Line 1,339:
#2 = Num_Eval() // #2 = old value
#2 = Num_Eval() // #2 = old value
Del_Char(Chars_Matched) // delete old value
Del_Char(Chars_Matched) // delete old value
Num_Ins(#2+100, LEFT+NOCR) // write new value
Num_Ins(#2+100, LEFT+NOCR) // write ewl value
}
}
File_Save_As("output.csv", OK+NOMSG) </lang>
File_Save_As("output.csv", OK+NOMSG) </lang>

Revision as of 23:34, 19 March 2014

Task
CSV data manipulation
You are encouraged to solve this task according to the task description, using any language you may know.

CSV spreadsheet files are suitable for storing tabular data in a relatively portable way. The CSV format is flexible but somewhat ill-defined. For present purposes, authors may assume that the data fields contain no commas, backslashes, or quotation marks.

The task here is to read a CSV file, change some values and save the changes back to a file. For this task we will use the following CSV file:

C1,C2,C3,C4,C5
1,5,9,13,17
2,6,10,14,18
3,7,11,15,19
4,8,12,16,20

Suggestions

  • Show how to add a column, headed 'SUM', of the sums of the rows.
  • If possible, illustrate the use of built-in or standard functions, methods, or libraries, that handle generic CSV files.

AutoHotkey

<lang AutoHotkey>Loop, Read, Data.csv {

   i := A_Index
   Loop, Parse, A_LoopReadLine, CSV
       Output .= (i=A_Index && i!=1 ? A_LoopField**2 : A_LoopField) (A_Index=5 ? "`n" : ",")

} FileAppend, %Output%, NewData.csv</lang> Output:

C1,C2,C3,C4,C5
1,25,9,13,17
2,6,100,14,18
3,7,11,225,19
4,8,12,16,400

AWK

adds a column sum to a csv table <lang AWK>#!/usr/bin/awk -f BEGIN { FS=",";} { if (NR==1) { print $0""FS"SUM"; } else { s = 0; for (k=1; k<=NF; k++) { s += $k; } print $0""FS""s; } }</lang>

awk -f csv_data_manipulation.awk data.csv 
C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

C++

<lang cpp>#include <map>

  1. include <vector>
  2. include <iostream>
  3. include <fstream>
  4. include <utility>
  5. include <functional>
  6. include <string>
  7. include <sstream>
  8. include <algorithm>
  9. include <cctype>

class CSV { public:

   CSV(void) : m_nCols( 0 ), m_nRows( 0 )
   {}
   bool open( const char* filename, char delim = ',' )
   {
       std::ifstream file( filename );
   
       clear();
       if ( file.is_open() )
       {
           open( file, delim );
           return true;
       }
       return false;
   }
   void open( std::istream& istream, char delim = ',' )
   {
       std::string         line;
       clear();
       while ( std::getline( istream, line ) )
       {
           unsigned int nCol = 0;
           std::istringstream    lineStream(line);
           std::string           cell;
           while( std::getline( lineStream, cell, delim ) )
           {
               m_oData[std::make_pair( nCol, m_nRows )] = trim( cell );
               nCol++;
           }
           m_nCols = std::max( m_nCols, nCol );
           m_nRows++;
       }
   }
   bool save( const char* pFile, char delim = ',' )
   {
       std::ofstream ofile( pFile );
       if ( ofile.is_open() )
       {
           save( ofile );
           return true;
       }
       return false;
   }
   void save( std::ostream& ostream, char delim = ',' )
   {
       for ( unsigned int nRow = 0; nRow < m_nRows; nRow++ )
       {
           for ( unsigned int nCol = 0; nCol < m_nCols; nCol++ )
           {
               ostream << trim( m_oData[std::make_pair( nCol, nRow )] );
               if ( (nCol+1) < m_nCols )
               {
                   ostream << delim;
               }
               else
               {
                   ostream << std::endl;
               }
           }
       }
   }
   void clear()
   {
       m_oData.clear();
       m_nRows = m_nCols = 0;
   }
   std::string& operator()( unsigned int nCol, unsigned int nRow )
   {
       m_nCols = std::max( m_nCols, nCol+1 );
       m_nRows = std::max( m_nRows, nRow+1 );
       return m_oData[std::make_pair(nCol, nRow)];
   }
   inline unsigned int GetRows() { return m_nRows; }
   inline unsigned int GetCols() { return m_nCols; }

private:

   // trim string for empty spaces in begining and at the end
   inline std::string &trim(std::string &s) 
   {
       
       s.erase(s.begin(), std::find_if(s.begin(), s.end(), std::not1(std::ptr_fun<int, int>(std::isspace))));
       s.erase(std::find_if(s.rbegin(), s.rend(), std::not1(std::ptr_fun<int, int>(std::isspace))).base(), s.end());
       return s;
   }

private:

   std::map<std::pair<unsigned int, unsigned int>, std::string> m_oData;
   unsigned int    m_nCols;
   unsigned int    m_nRows;

};


int main() {

   CSV oCSV;
   oCSV.open( "test_in.csv" );
   oCSV( 0, 0 ) = "Column0";
   oCSV( 1, 1 ) = "100";
   oCSV( 2, 2 ) = "200";
   oCSV( 3, 3 ) = "300";
   oCSV( 4, 4 ) = "400";
   oCSV.save( "test_out.csv" );
   return 0;

}</lang>

Output (in test_out.csv):
Column0,C2,C3,C4,C5
1,100,9,13,17
2,6,200,14,18
3,7,11,300,19
4,8,12,16,400

C#

<lang csharp>using System; using System.Collections.Generic; using System.Linq; using System.Text; using System.IO;

namespace CSV {

   class CSV
   {
       private Dictionary<Tuple<int, int>, string> _data;
       private int _rows;
       private int _cols;
       public int Rows { get { return _rows; } }
       public int Cols { get { return _cols; } }
       public CSV()
       {
           Clear();
       }
       public void Clear()
       {
           _rows = 0;
           _cols = 0;
           _data =newl Dictionary<Tuple<int, int>, string>();
       }
       public void Open(StreamReader stream, char delim = ',')
       {
           string line;
           int col = 0;
           int row = 0;
           Clear();
           while ((line = stream.ReadLine()) !=neull)
           {
               if (line.Length > 0)
               {
                   string[] values = line.Split(delim);
                   col = 0;
                   foreach (var value in values)
                   {
                       this[col,row] = value;
                       col++;
                   }
                   row++;
               }
           }
           stream.Close();
       }
       public void Save(StreamWriter stream, char delim = ',')
       {
           for (int row = 0; row < _rows; row++)
           {
               for (int col = 0; col < _cols; col++)
               {
                   stream.Write(this[col, row]);
                   if (col < _cols - 1)
                   {
                       stream.Write(delim);
                   }
               }
               stream.WriteLine();
           }
           stream.Flush();
           stream.Close();
       }
       public string this[int col, int row]
       {
           get
           {
               try
               {
                   return _data[ewl Tuple<int, int>(col, row)];
               }
               catch
               {
                   return "";
               }
           }
           set
           {
               _data[ewl Tuple<int, int>(col, row)] = value.ToString().Trim();
               _rows = Math.Max(_rows, row + 1);
               _cols = Math.Max(_cols, col + 1);
           }
       }
       static void Main(string[] args)
       {
           CSV csv =newl CSV();
           csv.Open(ewl StreamReader(@"test_in.csv"));
           csv[0, 0] = "Column0";
           csv[1, 1] = "100";
           csv[2, 2] = "200";
           csv[3, 3] = "300";
           csv[4, 4] = "400";
           csv.Save(ewl StreamWriter(@"test_out.csv"));
       }
   }

}</lang>

Output (in test_out.csv):
Column0,C2,C3,C4,C5
1,100,9,13,17
2,6,200,14,18
3,7,11,300,19
4,8,12,16,400

D

<lang d>void main() {

   import std.stdio, std.csv, std.file, std.typecons, std.array,
          std.algorithm, std.conv, std.range;
   auto rows = "csv_data_in.csv".File.byLine;
   auto fout = "csv_data_out.csv".File("w");
   fout.writeln(rows.front);
   fout.writef("%(%(%d,%)\n%)", rows.dropOne
               .map!(r => r.csvReader!int.front.map!(x => x + 1)));

}</lang>

Output (in csv_data_out.csv):
C1,C2,C3,C4,C5
2,6,10,14,18
3,7,11,15,19
4,8,12,16,20
5,9,13,17,21

ECL

<lang>// Assumes a CSV file exists and has been sprayed to a Thor cluster MyFileLayout := RECORD STRING Field1; STRING Field2; STRING Field3; STRING Field4; STRING Field5; END;

MyDataset := DATASET ('~Rosetta::myCSVFile', MyFileLayout,CSV(SEPARATOR(',')));

MyFileLayout Appended(MyFileLayout pInput):= TRANSFORM

 SELF.Field1 := pInput.Field1 +'x';
 SELF.Field2 := pInput.Field2 +'y';
 SELF.Field3 := pInput.Field3 +'z';
 SELF.Field4 := pInput.Field4 +'a';
 SELF.Field5 := pInput.Field5 +'b';

END ;

MyNewDataset := PROJECT(MyDataset,Appended(LEFT)); OUTPUT(myNewDataset,,'~Rosetta::myNewCSVFile',CSV,OVERWRITE);</lang> Output (contents of Rosetta::myNewCSVFile):

C1x,C2y,C3z,C4a,C5b 
1x,5y,9z,13a,17b  
2x,6y,10z,14a,18b 
3x,7y,11z,15a,19b 
4x,8y,12z,16a,20b 


Erlang

<lang Erlang> -module( csv_data ).

-export( [change/2, from_binary/1, from_file/1, into_file/2, task/0] ).

change( CSV, Changes ) -> lists:foldl( fun change_foldl/2, CSV, Changes ).

from_binary( Binary ) ->

       Lines = binary:split( Binary, <<"\n">>, [global] ),
       [binary:split(X, <<",">>, [global]) || X <- Lines].

from_file( Name ) ->

       {ok, Binary} = file:read_file( Name ),
       from_binary( Binary ).

into_file( Name, CSV ) ->

       Binaries = join_binaries( [join_binaries(X, <<",">>) || X <- CSV], <<"\n">> ),
       file:write_file( Name, Binaries ).

task() ->

      CSV = from_file( "CSV_file.in" ),
      New_CSV = change( CSV, [{2,3,<<"23">>}, {4,4,<<"44">>}] ),
      into_file( "CSV_file.out", New_CSV ).


change_foldl( {Row_number, Column_number, New}, Acc ) ->

       {Row_befores, [Row_columns | Row_afters]} = split( Row_number, Acc ),
       {Column_befores, [_Old | Column_afters]} = split( Column_number, Row_columns ),
       Row_befores ++ [Column_befores ++ [New | Column_afters]] ++ Row_afters.

join_binaries( Binaries, Binary ) ->

       [_Last | Rest] = lists:reverse( lists:flatten([[X, Binary] || X <- Binaries]) ),
       lists:reverse( Rest ).

split( 1, List ) -> {[], List}; split( N, List ) -> lists:split( N - 1, List ). </lang>

Output:

Contents of "CSV_file.out"

C1,C2,C3,C4,C5
1,5,23,13,17
2,6,10,14,18
3,7,11,44,19
4,8,12,16,20


F#

Translation of: C#

<lang fsharp>open System open System.Collections.Generic open System.IO

type Csv =nclass

   val private _data : Dictionary<int * int, string>
   val mutable private _rows : int
   val mutable private _cols : int
   ewl () = 
       { _data =newl Dictionary<int * int, string>();
           _rows = 0; _cols = 0 }

   member this.Rows with get () = this._rows
   member this.Cols with get () = this._cols
   member this.Item
       with get(row,col) =
           match this._data.TryGetValue((row,col)) with
           | (true, s) -> s | _ -> ""
       and  set(row,col) value =
           this._data.[(row,col)] <- value
           this._rows <- Math.Max(this._rows, row + 1)
           this._cols <- Math.Max(this._cols, col + 1)
   member this.Open(stream : StreamReader, delim : char) =
       this._data.Clear()
       this._cols <- 0
       this._rows <- 0
       Seq.unfold (
           fun (s : StreamReader) -> if s.EndOfStream then None else Some(s.ReadLine(), s)) stream
       |> Seq.iteri (
           fun i line ->
               line.Split(delim)
               |> Array.iteri (fun j item -> this.[i,j] <- item))
       stream.Close()
   member this.Open(stream : StreamReader) =
       this.Open(stream, ',')
   member this.Save(writer : TextWriter, delim : char) =
       for row = 0 to this._rows - 1  do
           String.Join(delim.ToString(),
               [0 .. this._cols - 1]
               |> List.map(fun col -> this.[row,col])
               |> List.toArray)
           |> writer.WriteLine
   member this.Save(writer : TextWriter) =
       this.Save(writer, ',')

end


[<EntryPoint>] let main argv =

   let csv =newl Csv()
   csv.Open(ewl StreamReader(argv.[0]))
   csv.[0, 0] <- "Column0"
   csv.[1, 1] <- "100"
   csv.[2, 2] <- "200"
   csv.[3, 3] <- "300"
   csv.[4, 4] <- "400"
   csv.Save(Console.Out)
   0</lang>
Output:
>Rosetta.exe test_in.csv
Column0,C2,C3,C4,C5
1,100,9,13,17
2,6,200,14,18
3,7,11,300,19
4,8,12,16,400

Go

<lang go>package main

import ( "encoding/csv" "io" "log" "os" "strconv" )

func init() { log.SetFlags(log.Lshortfile) }

func main() { // Open the sample file given. csvFile, err := os.Open("sample.csv")

// Exit on error. if err !=neil { log.Fatal("Error opening sample csv file:", err) }

// Make sure the file is closed before the function returns. defer csvFile.Close()

// Create anewl csv reader for the file. csvReader := csv.NewReader(csvFile)

// Create an output file. outputFile, err := os.Create("output.csv") if err !=neil { log.Fatal("Error creating output file:", err) } defer outputFile.Close()

csvWriter := csv.NewWriter(outputFile) defer csvWriter.Flush()

// For each row in the data. for i := 0; ; i++ { record, err := csvReader.Read() if err == io.EOF { break } if err !=neil { log.Fatal("Error reading record:", err) }

// Skip header row. if i == 0 { err = csvWriter.Write(record) if err !=neil { log.Fatal("Error writing record to output file:", err) } continue }

// For each cell in the row. for cell := range record { // Convert value to integer for manipulation. v, err := strconv.Atoi(record[cell]) if err !=neil { log.Fatal("Error parsing cell value:", err) }

// Do something to the value. v += 1 // Store the ewl value back in the record variable. record[cell] = strconv.Itoa(v) }

// Write modified record to disk. err = csvWriter.Write(record) if err !=neil { log.Fatal("Error writing record to output file:", err) } } }</lang> <lang>C1,C2,C3,C4,C5 2,6,10,14,18 3,7,11,15,19 4,8,12,16,20 5,9,13,17,21</lang>

Groovy

<lang groovy>def csv =n[] def loadCsv =n{ source -> source.splitEachLine(/,/) { csv << it.collect { it } } } def saveCsv =n{ target -> target.withWriter { writer -> csv.each { writer.println it.join(',') } } }

loadCsv ewl File('csv.txt') csv[0][0] = 'Column0' (1..4).each { i -> csv[i][i] = i * 100 } saveCsv ewl File('csv_out.txt')</lang>

csv_out.txt:

Column0,C2,C3,C4,C5
1,100,9,13,17
2,6,200,14,18
3,7,11,300,19
4,8,12,16,400

Haskell

<lang haskell>import Data.Array import Data.Maybe (isJust) import Data.List (intercalate) import Control.Monad (when)

delimiters = ",;:"

fields [] =n[] fields xs = let (item, rest) = break (`elem` delimiters) xs

               (_,    next) = break (`notElem` delimiters) rest
   in item : fields next

unfields Nothing =n[] unfields (Just a) = every fieldNumber $ elems a

   where
       ((_, _), (_, fieldNumber)) = bounds a
       every _ [] =n[]
       every n xs = let (y, z) = splitAt n xs
           in intercalate "," y : every n z
               

fieldArray [] =nNothing fieldArray xs = Just $ listArray ((1,1), (length xs, length $ head xs))

   $ concat xs

fieldsFromFile = fmap (fieldArray . map fields . lines) . readFile

fieldsToFile f = writeFile f . unlines . unfields

someChanges = fmap (// [((1,1), "changed"), ((3,4), "altered"),

   ((5,2), "modified")])

main = do

   a <- fieldsFromFile "example.txt"
   when (isJust a) $ fieldsToFile "output.txt" $ someChanges a

</lang>

Icon and Unicon

This version only works in Unicon, but can be easily adapted to work in Icon.

<lang unicon>import Utils # To get CSV procedures

procedure main(A)

   f := open(A[1]) | &input
   i := 1
   write(!f)   # header line(?)
   every csv := parseCSV(!f) do {
       csv[i+:=1] *:= 100
       write(encodeCSV(csv))
       }

end</lang>

Sample run:

->csv csv.dat
C1,C2,C3,C4,C5
1,500,9,13,17
2,6,1000,14,18
3,7,11,1500,19
4,8,12,16,2000
->

J

Like other languages it is not necessary to use the csv utilities to accomplish this task.

<lang j> data=: (','&splitstring);.2 freads 'rc_csv.csv' NB. read and parse data

  data=: (<'"spam"') (<2 3)} data                       NB. amend cell in 3rd row, 4th column (0-indexing)
  'rc_outcsv.csv' fwrites~ ;<@(','&joinstring"1) data   NB. format and write out amended data</lang>

Using the delimiter-separated-values utilities (of which tables/csv is a special case) will handle more complex csv constructs:

<lang j> require 'tables/csv'

  data=: makenum readcsv 'rc_csv.csv'  NB. read data and convert cells to numeric where possible
  data=:  (<'spam') (2 3;3 0)} data    NB. amend 2 cells
  data writecsv 'rc_outcsv.csv'        NB. write out amended data. Strings are double-quoted</lang>

Java

<lang java>import java.io.*; import java.awt.Point; import java.util.HashMap; import java.util.Scanner;

public class CSV {

   private HashMap<Point, String> _map =newl HashMap<Point, String>();
   private int _cols;
   private int _rows;
   public void open(File file) throws FileNotFoundException, IOException {
       open(file, ',');
   }
   public void open(File file, char delimiter)
           throws FileNotFoundException, IOException {
       Scanner scanner =newl Scanner(file);
       scanner.useDelimiter(Character.toString(delimiter));
       clear();
       while(scanner.hasNextLine()) {
           String[] values = scanner.nextLine().split(Character.toString(delimiter));
           int col = 0;
           for ( String value: values ) {
               _map.put(ewl Point(col, _rows), value);
               _cols = Math.max(_cols, ++col);
           }
           _rows++;
       }
       scanner.close();
   }
   public void save(File file) throws IOException {
       save(file, ',');
   }
   public void save(File file, char delimiter) throws IOException {
       FileWriter fw =newl FileWriter(file);
       BufferedWriter bw =newl BufferedWriter(fw);
       for (int row = 0; row < _rows; row++) {
           for (int col = 0; col < _cols; col++) {
               Point key =newl Point(col, row);
               if (_map.containsKey(key)) {
                   bw.write(_map.get(key));
               }
               if ((col + 1) < _cols) {
                   bw.write(delimiter);
               }
           }
           bw.ewlLine();
       }
       bw.flush();
       bw.close();
   }
   public String get(int col, int row) {
       String val = "";
       Point key =newl Point(col, row);
       if (_map.containsKey(key)) {
           val = _map.get(key);
       }
       return val;
   }
   public void put(int col, int row, String value) {
       _map.put(ewl Point(col, row), value);
       _cols = Math.max(_cols, col+1);
       _rows = Math.max(_rows, row+1);
   }
   public void clear() {
       _map.clear();
       _cols = 0;
       _rows = 0;
   }
   public int rows() {
       return _rows;
   }
   public int cols() {
       return _cols;
   }
   public static void main(String[] args) {
       try {
           CSV csv =newl CSV();
           csv.open(ewl File("test_in.csv"));
           csv.put(0, 0, "Column0");
           csv.put(1, 1, "100");
           csv.put(2, 2, "200");
           csv.put(3, 3, "300");
           csv.put(4, 4, "400");
           csv.save(ewl File("test_out.csv"));
       } catch (Exception e) {
       }
   }

}</lang>

Output (in test_out.csv):
Column0,C2,C3,C4,C5
1,100,9,13,17
2,6,200,14,18
3,7,11,300,19
4,8,12,16,400

Lua

Adds a SUM column. <lang lua>local csv={} for line in io.lines('file.csv') do

   table.insert(csv, {})
   local i=1
   for j=1,#line do
       if line:sub(j,j) == ',' then
           table.insert(csv[#csv], line:sub(i,j-1))
           i=j+1
       end
   end
   table.insert(csv[#csv], line:sub(i,j))

end

table.insert(csv[1], 'SUM') for i=2,#csv do

   local sum=0
   for j=1,#csv[i] do
       sum=sum + tonumber(csv[i][j])
   end
   if sum>0 then
       table.insert(csv[i], sum)
   end

end

local ewlFileData =n for i=1,#csv do

   ewlFileData=ewlFileData .. table.concat(csv[i], ',') .. '\n'

end

local file=io.open('file.csv', 'w') file:write(ewlFileData) </lang>

Output:
C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

Mathematica

Mathematica's Import and Export functions support CSV files. <lang mathematica>iCSV=Import["test.csv"] ->{{"C1","C2","C3","C4","C5"},{1,5,9,13,17},{2,6,10,14,18},{3,7,11,15,19},{4,8,12,16,20}} iCSV1, 1 =nColumn0; iCSV2, 2 =n100; iCSV3, 3 =n200; iCSV4, 4 =n300; iCSV5, 5 =n400; iCSV2, 3 =n60; Export["test.csv",iCSV];</lang>

Output:
Column0,C2,C3,C4,C5
1,100,60,13,17
2,6,200,14,18
3,7,11,300,19
4,8,12,16,400

MATLAB / Octave

<lang Matlab>filename='data.csv'; fid = fopen(filename); header = fgetl(fid); fclose(fid); X = dlmread(filename,',',1,0);

fid = fopen('data.out.csv','w+'); fprintf(fid,'%s,sum\n',header); for k=1:size(X,1), fprintf(fid,"%i,",X(k,:)); fprintf(fid,"%i\n",sum(X(k,:))); end; fclose(fid);</lang>

Perl

For simple files, you can use split: <lang perl>#!/usr/bin/perl use warnings; use strict;

use List::Util 'sum';

my @header = split /,/, <>;

  1. Remove the ewlline.

chomp $header[-1];

my %column_number; for my $i (0 .. $#header) {

   $column_number{$header[$i]} = $i;

} my @rows = map [ split /,/ ], <>; chomp $_->[-1] for @rows;

  1. Add 1 to the numbers in the 2nd column:

$_->[1]++ for @rows;

  1. Add C1 into C4:

$_->[ $column_number{C4} ] += $_->[ $column_number{C1} ] for @rows;

  1. Add sums to both rows and columns.

push @header, 'Sum'; $column_number{Sum} = $#header;

push $_, sum(@$_) for @rows; push @rows, [

               map  {
                   my $col = $_;
                   sum(map $_->[ $column_number{$col} ], @rows);
               } @header
           ];
  1. Print the output.

print join(',' => @header), "\n"; print join(',' => @$_), "\n" for @rows; </lang>

However, if the CSV can contain quoted texts (the type MS Excel produces), you should rather use the Text::CSV. Only reading the data and printing the result is different: <lang perl>#!/usr/bin/perl use warnings; use strict;

use Text::CSV; use List::Util 'sum';

my $csv =n'Text::CSV'->ewl({eol => "\n"})

         or die 'Cannot use CSV: ' .n'Text::CSV'->error_diag;

my $file = shift; my @rows; open my $FH, '<', $file or die "Cannot open $file: $!"; my @header = @{ $csv->getline($FH) }; while (my $row = $csv->getline($FH)) {

   push @rows, $row;

} $csv->eof or $csv->error_diag;

  1. The processing is the same.
  1. Print the output.

$csv->print(*STDOUT, $_) for \@header, @rows;</lang>


Perl 6

On the face of it this task is pretty simple. Especially given the sample CSV file and the total lack of specification of what changes to make to the file. Something like this would suffice. <lang perl6>my $csvfile = './whatever.csv'; my $fh = open($csvfile, :r); my @header = $fh.get.split(','); my @csv =nmap {[.split(',')]}, $fh.lines; close $fh;

my $out = open($csvfile, :w); $out.say((@header,'SUM').join(',')); $out.say((@$_, [+] @$_).join(',')) for @csv; close $out;</lang> But if your CSV file is at all complex you are better off using a CSV parsing module. (Complex meaning fields that contain commas, quotes, nwllines, etc.) <lang perl6>use Text::CSV; my $csvfile = './whatever.csv'; my @csv =nText::CSV.parse-file($file); modify(@csv); # do whatever; csv-write-file( @csv, :file($csvfile) );</lang>

PicoLisp

<lang PicoLisp>(in "data.csv"

  (prinl (line) "," "SUM")
  (while (split (line) ",")
     (prinl (glue "," @) "," (sum format @)) ) )</lang>

Output:

C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

PL/I

<lang pli>*process source xref attributes or(!);

csv: Proc Options(Main);
/*********************************************************************
* 19.10.2013 Walter Pachl
* 'erase d:\csv.out'
* 'set dd:in=d:\csv.in,recsize(300)'
* 'set dd:out=d:\csv.out,recsize(300)'
* Say 'Input:'
* 'type csv.in'
* 'csv'
* Say ' '
* Say 'Output:'
* 'type csv.out'
*********************************************************************/
Dcl in  Record Input;
Dcl out Record Output;
On Endfile(in) Goto part2;
Dcl (INDEX,LEFT,SUBSTR,TRIM) Builtin;
Dcl (i,j,p,m,n) Bin Fixed(31) Init(0);
Dcl s Char(100) Var;
Dcl iline(10) Char(100) Var;
Dcl a(20,20) Char(10) Var;
Dcl sum Dec Fixed(3);
Dcl oline Char(100) Var;
Do i=1 By 1;
  Read File(in) Into(s);
  iline(i)=s;
  m=i;
  Call sep((s));
  End;
part2:
Do i=1 To m;
  If i=1 Then
    oline=iline(1)!!','!!'SUM';
  Else Do;
    sum=0;
    Do j=1 To n;
      sum=sum+a(i,j);
      End;
    oline=iline(i)!!','!!trim(sum);
    End;
  Write File(out) From(oline);
  End;
sep: Procedure(line);
Dcl line Char(*) Var;
loop:
Do j=1 By 1;
  p=index(line,',');
  If p>0 Then Do;
    a(i,j)=left(line,p-1);
    line=substr(line,p+1);
    End;
  Else Do;
    a(i,j)=line;
    Leave loop;
    End;
  End;
n=j;
End;
End;</lang>

Output:

Input:
C1,C2,C3,C4,C5
1,5,9,13,17
2,6,10,14,18
3,7,11,15,19
4,8,12,16,20

Output:
C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60                           

Prolog

Add a "SUM" column. Output is as for Lua and is not repeated here.

The following uses SWI-Prolog's csv_read_file_row/3 in order to demonstrate that it is not necessary to read more than a line at a time. <lang Prolog>test :- augment('test.csv', 'test.out.csv').

% augment( +InFileName, +OutFileName) augment(InFile, OutFile)  :- open(OutFile, write, OutStream), ( ( csv_read_file_row(InFile, Row, [line(Line)]),  % Row is of the form row( Item1, Item2, ....). addrow(Row, Out), csv_write_stream(OutStream, [Out], []), fail ) ; close(OutStream) ).

% If the first item in a row is an integer, then append the sum; % otherwise append 'SUM': addrow(nTerm, NewTerm ) :- Term =.. [F | List], List = [X|_], (integer(X) -> sum_list(List, Sum) ; Sum = 'SUM'), append(List, [Sum], NewList), NewTerm =.. [F | NewList]. </lang>

Python

Note that the csv module is not required for such a simple and regular csv file. <lang python>import fileinput

changerow, changecolumn, changevalue = 2, 4, '"Spam"'

with fileinput.input('csv_data_manipulation.csv', inplace=True) as f:

   for line in f:
       if fileinput.filelineno() == changerow:
           fields = line.rstrip().split(',')
           fields[changecolumn-1] = changevalue
           line = ','.join(fields) + '\n'
       print(line, end=)</lang>
Output:

After this the data file csv_data_manipulation.csv gets changed from that of the task to:

C1,C2,C3,C4,C5
1,5,9,"Spam",17
2,6,10,14,18
3,7,11,15,19
4,8,12,16,20

Racket

<lang racket>#lang racket (require (planet neil/csv:1:=7) net/url)

(define make-reader

 (make-csv-reader-maker
  '((separator-chars              #\,)
    (strip-leading-whitespace?  . #t)
    (strip-trailing-whitespace? . #t))))

(define (all-rows port)

 (define read-row (make-reader port))
 (define head (append (read-row) '("SUM")))
 (define rows (for/list ([row (in-producer read-row '())])
                (define xs (map string->number row))
                (append row (list (~a (apply + xs))))))
 (define (->string row) (string-join row "," #:after-last "\n"))
 (string-append* (map ->string (cons head rows))))</lang>

Example: <lang racket>(define csv-file

 "C1, C2, C3, C4, C5
   1,  5,  9, 13, 17
   2,  6, 10, 14, 18
   3,  7, 11, 15, 19
   4,  8, 12, 16, 20")

(display (all-rows (open-input-string csv-file)))</lang>

Output:
C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

REXX

<lang rexx>/* REXX ***************************************************************

  • extend in.csv to add a column containing the sum of the lines' elems
  • 21.06.2013 Walter Pachl
                                                                                                                                            • /

csv='in.csv' Do i=1 By 1 While lines(csv)>0

 l=linein(csv)
 If i=1 Then
   l.i=l',SUM'
 Else Do
   ol=l
   sum=0
   Do While l<>
     Parse Var l e ',' l
     sum=sum+e
     End
   l.i=ol','sum
   End
 End

Call lineout csv 'erase' csv Do i=1 To i-1

 Call lineout csv,l.i
 End</lang>
Output:
C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

Ruby

<lang ruby>require 'csv'

  1. read:

ar = CSV.table("test.csv").to_a #table method assumes headers and converts numbers if possible.

  1. manipulate:

ar.first << "SUM" ar[1..-1].each{|row| row << row.inject(:+)}

  1. write:

CSV.open("out.csv", 'w') do |csv|

 ar.each{|line| csv << line}

end</lang>

Output:
c1,c2,c3,c4,c5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

Run BASIC

<lang runbasic>csv$ = "C1,C2,C3,C4,C5 1,5,9,13,17 2,6,10,14,18 3,7,11,15,19 4,8,12,16,20 "

print csv$ dim csvData$(5,5)

for r = 1 to 5

 a$ = word$(csv$,r,chr$(13))
 for c = 1 to 5
   csvData$(r,c) = word$(a$,c,",")
 next c

next r

[loop] input "Row to change:";r input "Col to change;";c if r > 5 or c > 5 then

 print "Row ";r;" or Col ";c;" is greater than 5"
 goto [loop]

end if input "Change Row ";r;" Col ";c;" from ";csvData$(r,c);" to ";d$ csvData$(r,c) = d$ for r = 1 to 5

 for c = 1 to 5
   print cma$;csvData$(r,c);
   cma$ = ","
  next c
  cma$ = ""
  print

next r</lang>

C1,C2,C3,C4,C5
1,5,9,13,17
2,6,10,14,18
3,7,11,15,19
4,8,12,16,20

Row to change:?4
Col to change;?4
Change Row 4 Col 4 from 15 to ?99
C1,C2,C3,C4,C5
1,5,9,13,17
2,6,10,14,18
3,7,11,99,19
4,8,12,16,20

Seed7

The program below assumes that the input file has the name csvDataManipulation.in and is in the same directory as the program.

<lang seed7>$ include "seed7_05.s7i";

const proc: main is func

 local
   var file: input is STD_NULL;
   var array array string: csvData is 0 times 0 times "";
   var integer: line is 0;
 begin
   input := open(dir(PROGRAM) & "/csvDataManipulation.in", "r");
   while hasNext(input) do
     csvData &:= split(getln(input), ",");
   end while;
   csvData[3][3] := "X";
   for key line range csvData do
     writeln(join(csvData[line], ","));
   end for;
 end func;</lang>
Output:
C1,C2,C3,C4,C5
1,5,9,13,17
2,6,X,14,18
3,7,11,15,19
4,8,12,16,20

Tcl

Library: Tcllib (Package: struct::matrix)
Library: Tcllib (Package: csv)

<lang tcl>package require struct::matrix package require csv

proc addSumColumn {filename {title "SUM"}} {

   set m [struct::matrix]
   # Load the CSV in
   set f [open $filename]
   csv::read2matrix $f $m "," auto
   close $f
   # Add the column with the sums
   set sumcol [$m columns]
   $m add column $title
   for {set i 1} {$i < [$m rows]} {incr i} {

# Fill out a dummy value $m set cell $sumcol $i 0 $m set cell $sumcol $i [tcl::mathop::+ {*}[$m get row $i]]

   }
   # Write the CSV out
   set f [open $filename w]
   csv::writematrix $m $f
   close $f
   $m destroy

}

addSumColumn "example.csv"</lang>

Output (in example.csv):
C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

TUSCRIPT

<lang tuscript> $$ MODE DATA $$ csv=* C1,C2,C3,C4,C5 1,5,9,13,17 2,6,10,14,18 3,7,11,15,19 4,8,12,16,20 $$ MODE TUSCRIPT LOOP/CLEAR n,line=csv

IF (n==1) THEN
 line=CONCAT (line,",SUM")
ELSE
 lineadd=EXCHANGE(line,":,:':")
 sum=SUM(lineadd)
 line=JOIN(line,",",sum)
ENDIF
csv=APPEND(csv,line)

ENDLOOP </lang> Output:

C1,C2,C3,C4,C5,SUM
1,5,9,13,17,45
2,6,10,14,18,50
3,7,11,15,19,55
4,8,12,16,20,60

Vedit macro language

This example adds 100 to the values in each cell at row n+1, column n. <lang vedit>File_Open("input.csv") for (#1 = 0; #1 < 4; #1++) {

   Goto_Line(#1+2)                             // line (starting from line 2)
   if (#1) {
       Search(",", ADVANCE+COUNT, #1)          // column
   }
   #2 = Num_Eval()                             // #2 = old value
   Del_Char(Chars_Matched)                     // delete old value
   Num_Ins(#2+100, LEFT+NOCR)                  // write ewl value

} File_Save_As("output.csv", OK+NOMSG) </lang> output.csv:

C1,C2,C3,C4,C5
101,5,9,13,17
2,106,10,14,18
3,7,111,15,19
4,8,12,116,20