c# 将多个 Json 反序列化到同一个列表中
c# deserialize mutliple Json into same list
我有一个 Api link return 这个 Json structure。在代码中我请求这个 Api link 然后将它反序列化到一个列表中。这是没有问题的。但是如果 Api return 超过 50 "counts" 它会创建另一个页面。我如何遍历所有页面并将所有内容添加到现有列表?
在我 linked 的情况下,将有 38 页。都需要添加到列表中。
通话
// spidyApiUrl = http://www.gw2spidy.com/api/v0.9/json/item-search/iron/1
var spidyApi_idByName_result = api_Handler.objFromApi_idToName(spidyApiUrl);
函数与return
public RootObject objFromApi_idToName(string url)
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(url);
WebResponse response = request.GetResponse();
using (Stream responseStream = response.GetResponseStream())
{
StreamReader reader = new StreamReader(responseStream, Encoding.UTF8);
var jsonReader = new JsonTextReader(reader);
var serializer = new JsonSerializer();
//return serializer.Deserialize<RootObject>(jsonReader);
RootObject rootObject = serializer.Deserialize<RootObject>(jsonReader);
if (rootObject.count > 0) { return rootObject; }
else { return null; }
}
}
当然我还有 get; set;
类.
我如何遍历所有页面(如果存在多个页面,这不是必须的)并将它们添加到同一个对象列表。
您需要继续下载数据直到 page == last_page
当您获得每一页数据时,您然后将新的 results
集添加到原始 rootObject 的 results
属性 与 AddRange
我还更改了从
传递到函数中的 url
http://www.gw2spidy.com/api/v0.9/json/item-search/iron/1
至
http://www.gw2spidy.com/api/v0.9/json/item-search/iron
这允许我将页码添加到 url 以获取所有页面
http://www.gw2spidy.com/api/v0.9/json/item-search/iron/1
http://www.gw2spidy.com/api/v0.9/json/item-search/iron/2
.....
http://www.gw2spidy.com/api/v0.9/json/item-search/iron/38
代码:
public class Result
{
public int data_id { get; set; }
public string name { get; set; }
public int rarity { get; set; }
public int restriction_level { get; set; }
public string img { get; set; }
public int type_id { get; set; }
public int sub_type_id { get; set; }
public string price_last_changed { get; set; }
public int max_offer_unit_price { get; set; }
public int min_sale_unit_price { get; set; }
public int offer_availability { get; set; }
public int sale_availability { get; set; }
public int sale_price_change_last_hour { get; set; }
public int offer_price_change_last_hour { get; set; }
}
public class RootObject
{
public int count { get; set; }
public int page { get; set; }
public int last_page { get; set; }
public int total { get; set; }
public List<Result> results { get; set; }
}
class Program
{
static void Main(string[] args)
{
objFromApi_idToName("http://www.gw2spidy.com/api/v0.9/json/item-search/iron");
}
public static RootObject objFromApi_idToName(string url)
{
RootObject rootObject = null;
RootObject tempRootObject = null;
int page = 1;
do
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(url + "/" + page);
WebResponse response = request.GetResponse();
using (Stream responseStream = response.GetResponseStream())
{
StreamReader reader = new StreamReader(responseStream, Encoding.UTF8);
var jsonReader = new JsonTextReader(reader);
var serializer = new JsonSerializer();
//return serializer.Deserialize<RootObject>(jsonReader);
tempRootObject = serializer.Deserialize<RootObject>(jsonReader);
if (rootObject == null)
{
rootObject = tempRootObject;
}
else
{
rootObject.results.AddRange(tempRootObject.results);
rootObject.count += tempRootObject.count;
}
}
page++;
} while (tempRootObject != null && tempRootObject.last_page != tempRootObject.page);
return rootObject;
}
}
您在使用网络 API 吗?如果是这样,你能试试这样吗?
public RootObject objFromApi_idToName(string url)
{
using (var client = new HttpClient())
{
client.BaseAddress = new Uri("<your uri here>");
client.DefaultRequestHeaders.Accept.Clear();
client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));
HttpResponseMessage response = await client.GetAsync("<uri extention here>");
if (response.IsSuccessStatusCode)
{
string jsonStr = await response.Content.ReadAsStringAsync();
var deserializedResponse = JsonConvert.DeserializeObject<List<your model class here>>(jsonStr);
return deserializedResponse;
}
}
我有一个 Api link return 这个 Json structure。在代码中我请求这个 Api link 然后将它反序列化到一个列表中。这是没有问题的。但是如果 Api return 超过 50 "counts" 它会创建另一个页面。我如何遍历所有页面并将所有内容添加到现有列表?
在我 linked 的情况下,将有 38 页。都需要添加到列表中。
通话
// spidyApiUrl = http://www.gw2spidy.com/api/v0.9/json/item-search/iron/1
var spidyApi_idByName_result = api_Handler.objFromApi_idToName(spidyApiUrl);
函数与return
public RootObject objFromApi_idToName(string url)
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(url);
WebResponse response = request.GetResponse();
using (Stream responseStream = response.GetResponseStream())
{
StreamReader reader = new StreamReader(responseStream, Encoding.UTF8);
var jsonReader = new JsonTextReader(reader);
var serializer = new JsonSerializer();
//return serializer.Deserialize<RootObject>(jsonReader);
RootObject rootObject = serializer.Deserialize<RootObject>(jsonReader);
if (rootObject.count > 0) { return rootObject; }
else { return null; }
}
}
当然我还有 get; set;
类.
我如何遍历所有页面(如果存在多个页面,这不是必须的)并将它们添加到同一个对象列表。
您需要继续下载数据直到 page == last_page
当您获得每一页数据时,您然后将新的 results
集添加到原始 rootObject 的 results
属性 与 AddRange
我还更改了从
传递到函数中的 urlhttp://www.gw2spidy.com/api/v0.9/json/item-search/iron/1
至
http://www.gw2spidy.com/api/v0.9/json/item-search/iron
这允许我将页码添加到 url 以获取所有页面
http://www.gw2spidy.com/api/v0.9/json/item-search/iron/1
http://www.gw2spidy.com/api/v0.9/json/item-search/iron/2
.....
http://www.gw2spidy.com/api/v0.9/json/item-search/iron/38
代码:
public class Result
{
public int data_id { get; set; }
public string name { get; set; }
public int rarity { get; set; }
public int restriction_level { get; set; }
public string img { get; set; }
public int type_id { get; set; }
public int sub_type_id { get; set; }
public string price_last_changed { get; set; }
public int max_offer_unit_price { get; set; }
public int min_sale_unit_price { get; set; }
public int offer_availability { get; set; }
public int sale_availability { get; set; }
public int sale_price_change_last_hour { get; set; }
public int offer_price_change_last_hour { get; set; }
}
public class RootObject
{
public int count { get; set; }
public int page { get; set; }
public int last_page { get; set; }
public int total { get; set; }
public List<Result> results { get; set; }
}
class Program
{
static void Main(string[] args)
{
objFromApi_idToName("http://www.gw2spidy.com/api/v0.9/json/item-search/iron");
}
public static RootObject objFromApi_idToName(string url)
{
RootObject rootObject = null;
RootObject tempRootObject = null;
int page = 1;
do
{
HttpWebRequest request = (HttpWebRequest)WebRequest.Create(url + "/" + page);
WebResponse response = request.GetResponse();
using (Stream responseStream = response.GetResponseStream())
{
StreamReader reader = new StreamReader(responseStream, Encoding.UTF8);
var jsonReader = new JsonTextReader(reader);
var serializer = new JsonSerializer();
//return serializer.Deserialize<RootObject>(jsonReader);
tempRootObject = serializer.Deserialize<RootObject>(jsonReader);
if (rootObject == null)
{
rootObject = tempRootObject;
}
else
{
rootObject.results.AddRange(tempRootObject.results);
rootObject.count += tempRootObject.count;
}
}
page++;
} while (tempRootObject != null && tempRootObject.last_page != tempRootObject.page);
return rootObject;
}
}
您在使用网络 API 吗?如果是这样,你能试试这样吗?
public RootObject objFromApi_idToName(string url)
{
using (var client = new HttpClient())
{
client.BaseAddress = new Uri("<your uri here>");
client.DefaultRequestHeaders.Accept.Clear();
client.DefaultRequestHeaders.Accept.Add(new MediaTypeWithQualityHeaderValue("application/json"));
HttpResponseMessage response = await client.GetAsync("<uri extention here>");
if (response.IsSuccessStatusCode)
{
string jsonStr = await response.Content.ReadAsStringAsync();
var deserializedResponse = JsonConvert.DeserializeObject<List<your model class here>>(jsonStr);
return deserializedResponse;
}
}